aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-02 15:27:52 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-09 11:58:01 +0000
commita4bba9c594c4022c9f85192bb8fd3593ad1a8d3c (patch)
tree0e79ebd7105411f6756e63d3ce23f16aaeb88566 /src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
parent3418ba520dd6251738ba905df84a201121433ecd (diff)
downloadComputeLibrary-a4bba9c594c4022c9f85192bb8fd3593ad1a8d3c.tar.gz
COMPMID-1995: Fix 32-bit NEDepthwiseConvolution errors.
-Updates padding handling in assembly depthwise kernels. -Fixes 32-bit runs issues for depthwise convolution. Change-Id: I3fe6369397c1d13f5629dd34c068ce4af53c95cd Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/939 Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp')
-rw-r--r--src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp1515
1 files changed, 1515 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
index 4ac6276123..010dd81bce 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
@@ -431,6 +431,491 @@ void Conv::execute_tile<ActivationFunction::None>(
template <>
template <>
+void Conv::execute_tile<ActivationFunction::None>(
+ int n_channels,
+ const void *weight_bias_ptr,
+ const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ float *outptrs[Base::output_tile_rows][Base::output_tile_cols]
+)
+{
+ __asm __volatile(
+ "mov x23, xzr\n"
+ "mov x24, xzr\n"
+ "and x25, %[n_channels], #3\n"
+ "lsr x26, %[n_channels], #2\n"
+ "cbz x26, 4f\n"
+ "1:\n"
+ "ldr q13, [%[wbptr]]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "mov v10.16b, v13.16b\n"
+ "ldr q12, [%[wbptr], #16]\n"
+ "mov v8.16b, v13.16b\n"
+ "ldr q6, [%[wbptr], #32]\n"
+ "mov v9.16b, v13.16b\n"
+ "ldr q5, [%[wbptr], #48]\n"
+ "mov v7.16b, v13.16b\n"
+ "ldr q11, [%[wbptr], #64]\n"
+ "ldr q4, [%[wbptr], #80]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr q3, [%[wbptr], #96]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr q2, [%[wbptr], #112]\n"
+ "ldr x27, [%[inptrs], 120]\n"
+ "ldr q1, [%[wbptr], #128]\n"
+ "subs x26, x26, #1\n"
+ "ldr q0, [%[wbptr], #144]\n"
+ "ldr q14, [x19, x23]\n"
+ "fmla v10.4s, v14.4s, v12.4s\n"
+ "ldr q18, [x20, x23]\n"
+ "ldr q14, [x21, x23]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "ldr q16, [x27, x23]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "ldr q19, [x19, x23]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "fmla v10.4s, v18.4s, v11.4s\n"
+ "ldr q15, [x20, x23]\n"
+ "ldr q18, [x21, x23]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "ldr q13, [x19, x23]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fmla v8.4s, v14.4s, v12.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v10.4s, v15.4s, v4.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v9.4s, v13.4s, v12.4s\n"
+ "ldr q14, [x20, x23]\n"
+ "ldr q17, [x19, x23]\n"
+ "ldr x22, [%[inptrs], 160]\n"
+ "fmla v8.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 128]\n"
+ "fmla v10.4s, v13.4s, v5.4s\n"
+ "ldr q15, [x22, x23]\n"
+ "fmla v9.4s, v14.4s, v11.4s\n"
+ "ldr q19, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v8.4s, v18.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 168]\n"
+ "fmla v10.4s, v18.4s, v1.4s\n"
+ "ldr q13, [x21, x23]\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "ldr q18, [x20, x23]\n"
+ "fmla v7.4s, v13.4s, v12.4s\n"
+ "ldr q17, [x19, x23]\n"
+ "fmla v8.4s, v15.4s, v2.4s\n"
+ "ldr q15, [x22, x23]\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "ldr x27, [%[inptrs], 136]\n"
+ "fmla v9.4s, v13.4s, v2.4s\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr q16, [x27, x23]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v8.4s, v19.4s, v4.4s\n"
+ "ldr q19, [x21, x23]\n"
+ "fmla v10.4s, v13.4s, v0.4s\n"
+ "ldr q12, [x20, x23]\n"
+ "fmla v9.4s, v18.4s, v4.4s\n"
+ "ldr x22, [%[inptrs], 176]\n"
+ "fmla v7.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 144]\n"
+ "fmla v8.4s, v13.4s, v5.4s\n"
+ "ldr q11, [x22, x23]\n"
+ "ldr q13, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v9.4s, v17.4s, v5.4s\n"
+ "ldr x22, [%[inptrs], 184]\n"
+ "fmla v7.4s, v19.4s, v6.4s\n"
+ "ldr q14, [x21, x23]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr q17, [x22, x23]\n"
+ "ldr x27, [%[inptrs], 152]\n"
+ "ldr x22, [%[inptrs], 192]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str q10, [x21, x24]\n"
+ "fmla v7.4s, v11.4s, v2.4s\n"
+ "fmla v8.4s, v16.4s, v3.4s\n"
+ "ldr q16, [x27, x23]\n"
+ "ldr q15, [x22, x23]\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v9.4s, v12.4s, v3.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v7.4s, v13.4s, v4.4s\n"
+ "ldr q13, [%[wbptr]]\n"
+ "fmla v8.4s, v11.4s, v0.4s\n"
+ "ldr q12, [%[wbptr], #16]\n"
+ "mov v10.16b, v13.16b\n"
+ "ldr q6, [%[wbptr], #32]\n"
+ "fmla v9.4s, v14.4s, v0.4s\n"
+ "ldr q11, [%[wbptr], #64]\n"
+ "fmla v7.4s, v14.4s, v5.4s\n"
+ "ldr q4, [%[wbptr], #80]\n"
+ "str q8, [x28, x24]\n"
+ "add x23, x23, #16\n"
+ "mov v8.16b, v13.16b\n"
+ "ldr q2, [%[wbptr], #112]\n"
+ "str q9, [x21, x24]\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "fmla v7.4s, v17.4s, v1.4s\n"
+ "ldr q5, [%[wbptr], #48]\n"
+ "mov v9.16b, v13.16b\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr x27, [%[inptrs], 120]\n"
+ "subs x26, x26, #1\n"
+ "fmla v7.4s, v16.4s, v3.4s\n"
+ "ldr q1, [%[wbptr], #128]\n"
+ "ldr q14, [x19, x23]\n"
+ "fmla v10.4s, v14.4s, v12.4s\n"
+ "ldr q18, [x20, x23]\n"
+ "ldr q14, [x21, x23]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "fmla v7.4s, v15.4s, v0.4s\n"
+ "ldr q3, [%[wbptr], #96]\n"
+ "ldr q19, [x19, x23]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "fmla v10.4s, v18.4s, v11.4s\n"
+ "ldr q16, [x27, x23]\n"
+ "ldr q15, [x20, x23]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "str q7, [x28, x24]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "mov v7.16b, v13.16b\n"
+ "ldr q0, [%[wbptr], #144]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "ldr q13, [x19, x23]\n"
+ "ldr q18, [x21, x23]\n"
+ "add x24, x24, #16\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fmla v8.4s, v14.4s, v12.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v10.4s, v15.4s, v4.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v9.4s, v13.4s, v12.4s\n"
+ "ldr q14, [x20, x23]\n"
+ "ldr q17, [x19, x23]\n"
+ "ldr x22, [%[inptrs], 160]\n"
+ "fmla v8.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 128]\n"
+ "fmla v10.4s, v13.4s, v5.4s\n"
+ "ldr q15, [x22, x23]\n"
+ "fmla v9.4s, v14.4s, v11.4s\n"
+ "ldr q19, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v8.4s, v18.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 168]\n"
+ "fmla v10.4s, v18.4s, v1.4s\n"
+ "ldr q13, [x21, x23]\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "ldr q18, [x20, x23]\n"
+ "fmla v7.4s, v13.4s, v12.4s\n"
+ "ldr q17, [x19, x23]\n"
+ "fmla v8.4s, v15.4s, v2.4s\n"
+ "ldr q15, [x22, x23]\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "ldr x27, [%[inptrs], 136]\n"
+ "fmla v9.4s, v13.4s, v2.4s\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr q16, [x27, x23]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v8.4s, v19.4s, v4.4s\n"
+ "ldr q19, [x21, x23]\n"
+ "fmla v10.4s, v13.4s, v0.4s\n"
+ "ldr q12, [x20, x23]\n"
+ "fmla v9.4s, v18.4s, v4.4s\n"
+ "ldr x22, [%[inptrs], 176]\n"
+ "fmla v7.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 144]\n"
+ "fmla v8.4s, v13.4s, v5.4s\n"
+ "ldr q11, [x22, x23]\n"
+ "ldr q13, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v9.4s, v17.4s, v5.4s\n"
+ "ldr x22, [%[inptrs], 184]\n"
+ "fmla v7.4s, v19.4s, v6.4s\n"
+ "ldr q14, [x21, x23]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr q17, [x22, x23]\n"
+ "ldr x27, [%[inptrs], 152]\n"
+ "ldr x22, [%[inptrs], 192]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str q10, [x21, x24]\n"
+ "fmla v7.4s, v11.4s, v2.4s\n"
+ "fmla v8.4s, v16.4s, v3.4s\n"
+ "ldr q16, [x27, x23]\n"
+ "ldr q15, [x22, x23]\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v9.4s, v12.4s, v3.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v7.4s, v13.4s, v4.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v8.4s, v11.4s, v0.4s\n"
+ "add x23, x23, #16\n"
+ "fmla v9.4s, v14.4s, v0.4s\n"
+ "fmla v7.4s, v14.4s, v5.4s\n"
+ "str q8, [x28, x24]\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "str q9, [x21, x24]\n"
+ "fmla v7.4s, v17.4s, v1.4s\n"
+ "fmla v7.4s, v16.4s, v3.4s\n"
+ "fmla v7.4s, v15.4s, v0.4s\n"
+ "str q7, [x28, x24]\n"
+ "add x24, x24, #16\n"
+ "4:\n"
+ "cbz x25, 7f\n"
+ "ldr s13, [%[wbptr]]\n"
+ "mov v10.16b, v13.16b\n"
+ "ldr s12, [%[wbptr], #4]\n"
+ "mov v8.16b, v13.16b\n"
+ "ldr s6, [%[wbptr], #8]\n"
+ "mov v9.16b, v13.16b\n"
+ "ldr s5, [%[wbptr], #12]\n"
+ "mov v7.16b, v13.16b\n"
+ "ldr s11, [%[wbptr], #16]\n"
+ "ldr s4, [%[wbptr], #20]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "ldr s3, [%[wbptr], #24]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr s2, [%[wbptr], #28]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr s1, [%[wbptr], #32]\n"
+ "ldr x27, [%[inptrs], 120]\n"
+ "ldr s0, [%[wbptr], #36]\n"
+ "subs x25, x25, #1\n"
+ "ldr s14, [x19, x23]\n"
+ "ldr s18, [x20, x23]\n"
+ "fmla v10.4s, v14.4s, v12.4s\n"
+ "ldr s14, [x21, x23]\n"
+ "ldr s16, [x27, x23]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "ldr s19, [x19, x23]\n"
+ "fmla v10.4s, v18.4s, v11.4s\n"
+ "ldr s15, [x20, x23]\n"
+ "ldr s18, [x21, x23]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "ldr s13, [x19, x23]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "beq 6f\n"
+ "5:\n"
+ "fmla v8.4s, v14.4s, v12.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v10.4s, v15.4s, v4.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v9.4s, v13.4s, v12.4s\n"
+ "ldr s14, [x20, x23]\n"
+ "ldr s17, [x19, x23]\n"
+ "ldr x22, [%[inptrs], 160]\n"
+ "fmla v8.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 128]\n"
+ "fmla v10.4s, v13.4s, v5.4s\n"
+ "ldr s15, [x22, x23]\n"
+ "fmla v9.4s, v14.4s, v11.4s\n"
+ "ldr s19, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v8.4s, v18.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 168]\n"
+ "fmla v10.4s, v18.4s, v1.4s\n"
+ "ldr s13, [x21, x23]\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "ldr s18, [x20, x23]\n"
+ "fmla v7.4s, v13.4s, v12.4s\n"
+ "ldr s17, [x19, x23]\n"
+ "fmla v8.4s, v15.4s, v2.4s\n"
+ "ldr s15, [x22, x23]\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "ldr x27, [%[inptrs], 136]\n"
+ "fmla v9.4s, v13.4s, v2.4s\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr s16, [x27, x23]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v8.4s, v19.4s, v4.4s\n"
+ "ldr s19, [x21, x23]\n"
+ "fmla v10.4s, v13.4s, v0.4s\n"
+ "ldr s12, [x20, x23]\n"
+ "fmla v9.4s, v18.4s, v4.4s\n"
+ "ldr x22, [%[inptrs], 176]\n"
+ "fmla v7.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 144]\n"
+ "fmla v8.4s, v13.4s, v5.4s\n"
+ "ldr s11, [x22, x23]\n"
+ "ldr s13, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v9.4s, v17.4s, v5.4s\n"
+ "ldr x22, [%[inptrs], 184]\n"
+ "fmla v7.4s, v19.4s, v6.4s\n"
+ "ldr s14, [x21, x23]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr s17, [x22, x23]\n"
+ "ldr x27, [%[inptrs], 152]\n"
+ "ldr x22, [%[inptrs], 192]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str s10, [x21, x24]\n"
+ "fmla v7.4s, v11.4s, v2.4s\n"
+ "fmla v8.4s, v16.4s, v3.4s\n"
+ "ldr s16, [x27, x23]\n"
+ "ldr s15, [x22, x23]\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v9.4s, v12.4s, v3.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v7.4s, v13.4s, v4.4s\n"
+ "ldr s13, [%[wbptr]]\n"
+ "fmla v8.4s, v11.4s, v0.4s\n"
+ "ldr s12, [%[wbptr], #4]\n"
+ "mov v10.16b, v13.16b\n"
+ "ldr s6, [%[wbptr], #8]\n"
+ "fmla v9.4s, v14.4s, v0.4s\n"
+ "ldr s11, [%[wbptr], #16]\n"
+ "fmla v7.4s, v14.4s, v5.4s\n"
+ "ldr s4, [%[wbptr], #20]\n"
+ "str s8, [x28, x24]\n"
+ "add x23, x23, #4\n"
+ "mov v8.16b, v13.16b\n"
+ "ldr s2, [%[wbptr], #28]\n"
+ "str s9, [x21, x24]\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "fmla v7.4s, v17.4s, v1.4s\n"
+ "ldr s5, [%[wbptr], #12]\n"
+ "mov v9.16b, v13.16b\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr x27, [%[inptrs], 120]\n"
+ "subs x25, x25, #1\n"
+ "fmla v7.4s, v16.4s, v3.4s\n"
+ "ldr s1, [%[wbptr], #32]\n"
+ "ldr s14, [x19, x23]\n"
+ "fmla v10.4s, v14.4s, v12.4s\n"
+ "ldr s18, [x20, x23]\n"
+ "ldr s14, [x21, x23]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "fmla v7.4s, v15.4s, v0.4s\n"
+ "ldr s3, [%[wbptr], #24]\n"
+ "ldr s19, [x19, x23]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "fmla v10.4s, v18.4s, v11.4s\n"
+ "ldr s16, [x27, x23]\n"
+ "ldr s15, [x20, x23]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "str s7, [x28, x24]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "mov v7.16b, v13.16b\n"
+ "ldr s0, [%[wbptr], #36]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "ldr s13, [x19, x23]\n"
+ "ldr s18, [x21, x23]\n"
+ "add x24, x24, #4\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "bne 5b\n"
+ "6:\n"
+ "fmla v8.4s, v14.4s, v12.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v10.4s, v15.4s, v4.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v9.4s, v13.4s, v12.4s\n"
+ "ldr s14, [x20, x23]\n"
+ "ldr s17, [x19, x23]\n"
+ "ldr x22, [%[inptrs], 160]\n"
+ "fmla v8.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 128]\n"
+ "fmla v10.4s, v13.4s, v5.4s\n"
+ "ldr s15, [x22, x23]\n"
+ "fmla v9.4s, v14.4s, v11.4s\n"
+ "ldr s19, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v8.4s, v18.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 168]\n"
+ "fmla v10.4s, v18.4s, v1.4s\n"
+ "ldr s13, [x21, x23]\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "ldr s18, [x20, x23]\n"
+ "fmla v7.4s, v13.4s, v12.4s\n"
+ "ldr s17, [x19, x23]\n"
+ "fmla v8.4s, v15.4s, v2.4s\n"
+ "ldr s15, [x22, x23]\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "ldr x27, [%[inptrs], 136]\n"
+ "fmla v9.4s, v13.4s, v2.4s\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr s16, [x27, x23]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v8.4s, v19.4s, v4.4s\n"
+ "ldr s19, [x21, x23]\n"
+ "fmla v10.4s, v13.4s, v0.4s\n"
+ "ldr s12, [x20, x23]\n"
+ "fmla v9.4s, v18.4s, v4.4s\n"
+ "ldr x22, [%[inptrs], 176]\n"
+ "fmla v7.4s, v16.4s, v11.4s\n"
+ "ldr x27, [%[inptrs], 144]\n"
+ "fmla v8.4s, v13.4s, v5.4s\n"
+ "ldr s11, [x22, x23]\n"
+ "ldr s13, [x27, x23]\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v9.4s, v17.4s, v5.4s\n"
+ "ldr x22, [%[inptrs], 184]\n"
+ "fmla v7.4s, v19.4s, v6.4s\n"
+ "ldr s14, [x21, x23]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr s17, [x22, x23]\n"
+ "ldr x27, [%[inptrs], 152]\n"
+ "ldr x22, [%[inptrs], 192]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str s10, [x21, x24]\n"
+ "fmla v7.4s, v11.4s, v2.4s\n"
+ "fmla v8.4s, v16.4s, v3.4s\n"
+ "ldr s16, [x27, x23]\n"
+ "ldr s15, [x22, x23]\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v9.4s, v12.4s, v3.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v7.4s, v13.4s, v4.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v8.4s, v11.4s, v0.4s\n"
+ "add x23, x23, #4\n"
+ "fmla v9.4s, v14.4s, v0.4s\n"
+ "fmla v7.4s, v14.4s, v5.4s\n"
+ "str s8, [x28, x24]\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "str s9, [x21, x24]\n"
+ "fmla v7.4s, v17.4s, v1.4s\n"
+ "fmla v7.4s, v16.4s, v3.4s\n"
+ "fmla v7.4s, v15.4s, v0.4s\n"
+ "str s7, [x28, x24]\n"
+ "add x24, x24, #4\n"
+ "7:\n"
+ : [wbptr] "+r" (weight_bias_ptr)
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long) n_channels), [outptrs] "r" (outptrs)
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
+
+template <>
+template <>
void Conv::execute_tile<ActivationFunction::ReLU>(
int n_channels,
const void *weight_bias_ptr,
@@ -850,6 +1335,511 @@ void Conv::execute_tile<ActivationFunction::ReLU>(
template <>
template <>
+void Conv::execute_tile<ActivationFunction::ReLU>(
+ int n_channels,
+ const void *weight_bias_ptr,
+ const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ float *outptrs[Base::output_tile_rows][Base::output_tile_cols]
+)
+{
+ __asm __volatile(
+ "mov x22, xzr\n"
+ "mov x26, xzr\n"
+ "and x23, %[n_channels], #3\n"
+ "lsr x24, %[n_channels], #2\n"
+ "cbz x24, 4f\n"
+ "1:\n"
+ "ldr q14, [%[wbptr]]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "mov v3.16b, v14.16b\n"
+ "ldr q13, [%[wbptr], #16]\n"
+ "mov v1.16b, v14.16b\n"
+ "ldr q11, [%[wbptr], #32]\n"
+ "mov v2.16b, v14.16b\n"
+ "ldr q4, [%[wbptr], #48]\n"
+ "mov v0.16b, v14.16b\n"
+ "ldr q12, [%[wbptr], #64]\n"
+ "ldr q9, [%[wbptr], #80]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr q8, [%[wbptr], #96]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr q7, [%[wbptr], #112]\n"
+ "ldr x25, [%[inptrs], 120]\n"
+ "ldr q6, [%[wbptr], #128]\n"
+ "subs x24, x24, #1\n"
+ "ldr q5, [%[wbptr], #144]\n"
+ "ldr q15, [x19, x22]\n"
+ "fmla v3.4s, v15.4s, v13.4s\n"
+ "ldr q17, [x20, x22]\n"
+ "ldr q16, [x21, x22]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "ldr q15, [x25, x22]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "ldr q10, [x19, x22]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "fmla v3.4s, v17.4s, v12.4s\n"
+ "ldr q17, [x20, x22]\n"
+ "ldr q14, [x21, x22]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "ldr q18, [x19, x22]\n"
+ "fmla v3.4s, v10.4s, v11.4s\n"
+ "fmla v3.4s, v16.4s, v7.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fmla v1.4s, v16.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v3.4s, v17.4s, v9.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v2.4s, v18.4s, v13.4s\n"
+ "ldr q16, [x20, x22]\n"
+ "movi v10.16b, #0\n"
+ "ldr q17, [x19, x22]\n"
+ "fmla v1.4s, v15.4s, v12.4s\n"
+ "ldr x27, [%[inptrs], 160]\n"
+ "fmla v3.4s, v18.4s, v4.4s\n"
+ "ldr x25, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "ldr q18, [x27, x22]\n"
+ "ldr q15, [x25, x22]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "fmla v1.4s, v14.4s, v11.4s\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "fmla v3.4s, v14.4s, v6.4s\n"
+ "ldr q14, [x21, x22]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr q17, [x20, x22]\n"
+ "fmla v0.4s, v14.4s, v13.4s\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v1.4s, v18.4s, v7.4s\n"
+ "ldr x27, [%[inptrs], 168]\n"
+ "fmla v3.4s, v16.4s, v8.4s\n"
+ "ldr q18, [x19, x22]\n"
+ "fmla v2.4s, v14.4s, v7.4s\n"
+ "ldr q13, [x27, x22]\n"
+ "ldr x25, [%[inptrs], 136]\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v1.4s, v15.4s, v9.4s\n"
+ "ldr x27, [%[inptrs], 176]\n"
+ "fmla v3.4s, v14.4s, v5.4s\n"
+ "ldr q16, [x25, x22]\n"
+ "fmla v2.4s, v17.4s, v9.4s\n"
+ "ldr q17, [x21, x22]\n"
+ "fmla v0.4s, v16.4s, v12.4s\n"
+ "ldr q12, [x20, x22]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "ldr q15, [x27, x22]\n"
+ "fmax v3.4s, v3.4s, v10.4s\n"
+ "ldr x25, [%[inptrs], 144]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v0.4s, v17.4s, v11.4s\n"
+ "ldr q14, [x25, x22]\n"
+ "fmla v1.4s, v13.4s, v6.4s\n"
+ "ldr q11, [x21, x22]\n"
+ "ldr x27, [%[inptrs], 184]\n"
+ "ldr x25, [%[inptrs], 152]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str q3, [x21, x26]\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "fmla v1.4s, v16.4s, v8.4s\n"
+ "ldr q18, [x27, x22]\n"
+ "ldr q17, [x25, x22]\n"
+ "ldr x27, [%[inptrs], 192]\n"
+ "fmla v2.4s, v12.4s, v8.4s\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v0.4s, v14.4s, v9.4s\n"
+ "ldr q16, [x27, x22]\n"
+ "fmla v1.4s, v15.4s, v5.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "ldr q14, [%[wbptr]]\n"
+ "add x22, x22, #16\n"
+ "fmla v2.4s, v11.4s, v5.4s\n"
+ "ldr q13, [%[wbptr], #16]\n"
+ "fmla v0.4s, v11.4s, v4.4s\n"
+ "ldr q11, [%[wbptr], #32]\n"
+ "fmax v1.4s, v1.4s, v10.4s\n"
+ "ldr q12, [%[wbptr], #64]\n"
+ "mov v3.16b, v14.16b\n"
+ "ldr q9, [%[wbptr], #80]\n"
+ "fmax v2.4s, v2.4s, v10.4s\n"
+ "ldr q7, [%[wbptr], #112]\n"
+ "str q1, [x28, x26]\n"
+ "fmla v0.4s, v18.4s, v6.4s\n"
+ "mov v1.16b, v14.16b\n"
+ "ldr q4, [%[wbptr], #48]\n"
+ "str q2, [x21, x26]\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "mov v2.16b, v14.16b\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v0.4s, v17.4s, v8.4s\n"
+ "ldr q6, [%[wbptr], #128]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr x25, [%[inptrs], 120]\n"
+ "subs x24, x24, #1\n"
+ "ldr q15, [x19, x22]\n"
+ "fmla v0.4s, v16.4s, v5.4s\n"
+ "ldr q8, [%[wbptr], #96]\n"
+ "fmla v3.4s, v15.4s, v13.4s\n"
+ "ldr q17, [x20, x22]\n"
+ "ldr q16, [x21, x22]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "ldr q15, [x25, x22]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "fmax v0.4s, v0.4s, v10.4s\n"
+ "ldr q5, [%[wbptr], #144]\n"
+ "fmla v3.4s, v17.4s, v12.4s\n"
+ "ldr q10, [x19, x22]\n"
+ "ldr q17, [x20, x22]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "str q0, [x28, x26]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "mov v0.16b, v14.16b\n"
+ "ldr q18, [x19, x22]\n"
+ "fmla v3.4s, v10.4s, v11.4s\n"
+ "ldr q14, [x21, x22]\n"
+ "add x26, x26, #16\n"
+ "fmla v3.4s, v16.4s, v7.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fmla v1.4s, v16.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v3.4s, v17.4s, v9.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v2.4s, v18.4s, v13.4s\n"
+ "ldr q16, [x20, x22]\n"
+ "movi v10.16b, #0\n"
+ "ldr q17, [x19, x22]\n"
+ "fmla v1.4s, v15.4s, v12.4s\n"
+ "ldr x27, [%[inptrs], 160]\n"
+ "fmla v3.4s, v18.4s, v4.4s\n"
+ "ldr x25, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "ldr q18, [x27, x22]\n"
+ "ldr q15, [x25, x22]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "fmla v1.4s, v14.4s, v11.4s\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "fmla v3.4s, v14.4s, v6.4s\n"
+ "ldr q14, [x21, x22]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr q17, [x20, x22]\n"
+ "fmla v0.4s, v14.4s, v13.4s\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v1.4s, v18.4s, v7.4s\n"
+ "ldr x27, [%[inptrs], 168]\n"
+ "fmla v3.4s, v16.4s, v8.4s\n"
+ "ldr q18, [x19, x22]\n"
+ "fmla v2.4s, v14.4s, v7.4s\n"
+ "ldr q13, [x27, x22]\n"
+ "ldr x25, [%[inptrs], 136]\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v1.4s, v15.4s, v9.4s\n"
+ "ldr x27, [%[inptrs], 176]\n"
+ "fmla v3.4s, v14.4s, v5.4s\n"
+ "ldr q16, [x25, x22]\n"
+ "fmla v2.4s, v17.4s, v9.4s\n"
+ "ldr q17, [x21, x22]\n"
+ "fmla v0.4s, v16.4s, v12.4s\n"
+ "ldr q12, [x20, x22]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "ldr q15, [x27, x22]\n"
+ "fmax v3.4s, v3.4s, v10.4s\n"
+ "ldr x25, [%[inptrs], 144]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v0.4s, v17.4s, v11.4s\n"
+ "ldr q14, [x25, x22]\n"
+ "fmla v1.4s, v13.4s, v6.4s\n"
+ "ldr q11, [x21, x22]\n"
+ "ldr x27, [%[inptrs], 184]\n"
+ "ldr x25, [%[inptrs], 152]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str q3, [x21, x26]\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "fmla v1.4s, v16.4s, v8.4s\n"
+ "ldr q18, [x27, x22]\n"
+ "ldr q17, [x25, x22]\n"
+ "ldr x27, [%[inptrs], 192]\n"
+ "fmla v2.4s, v12.4s, v8.4s\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v0.4s, v14.4s, v9.4s\n"
+ "ldr q16, [x27, x22]\n"
+ "fmla v1.4s, v15.4s, v5.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "add x22, x22, #16\n"
+ "fmla v2.4s, v11.4s, v5.4s\n"
+ "fmla v0.4s, v11.4s, v4.4s\n"
+ "fmax v1.4s, v1.4s, v10.4s\n"
+ "fmax v2.4s, v2.4s, v10.4s\n"
+ "str q1, [x28, x26]\n"
+ "fmla v0.4s, v18.4s, v6.4s\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "str q2, [x21, x26]\n"
+ "fmla v0.4s, v17.4s, v8.4s\n"
+ "fmla v0.4s, v16.4s, v5.4s\n"
+ "fmax v0.4s, v0.4s, v10.4s\n"
+ "str q0, [x28, x26]\n"
+ "add x26, x26, #16\n"
+ "4:\n"
+ "cbz x23, 7f\n"
+ "ldr s14, [%[wbptr]]\n"
+ "mov v3.16b, v14.16b\n"
+ "ldr s13, [%[wbptr], #4]\n"
+ "mov v1.16b, v14.16b\n"
+ "ldr s11, [%[wbptr], #8]\n"
+ "mov v2.16b, v14.16b\n"
+ "ldr s4, [%[wbptr], #12]\n"
+ "mov v0.16b, v14.16b\n"
+ "ldr s12, [%[wbptr], #16]\n"
+ "ldr s9, [%[wbptr], #20]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "ldr s8, [%[wbptr], #24]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr s7, [%[wbptr], #28]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr s6, [%[wbptr], #32]\n"
+ "ldr x25, [%[inptrs], 120]\n"
+ "ldr s5, [%[wbptr], #36]\n"
+ "subs x23, x23, #1\n"
+ "ldr s15, [x19, x22]\n"
+ "ldr s17, [x20, x22]\n"
+ "fmla v3.4s, v15.4s, v13.4s\n"
+ "ldr s16, [x21, x22]\n"
+ "ldr s15, [x25, x22]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "ldr s10, [x19, x22]\n"
+ "fmla v3.4s, v17.4s, v12.4s\n"
+ "ldr s17, [x20, x22]\n"
+ "ldr s14, [x21, x22]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "ldr s18, [x19, x22]\n"
+ "fmla v3.4s, v10.4s, v11.4s\n"
+ "fmla v3.4s, v16.4s, v7.4s\n"
+ "beq 6f\n"
+ "5:\n"
+ "fmla v1.4s, v16.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v3.4s, v17.4s, v9.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v2.4s, v18.4s, v13.4s\n"
+ "ldr s16, [x20, x22]\n"
+ "movi v10.16b, #0\n"
+ "ldr s17, [x19, x22]\n"
+ "fmla v1.4s, v15.4s, v12.4s\n"
+ "ldr x27, [%[inptrs], 160]\n"
+ "fmla v3.4s, v18.4s, v4.4s\n"
+ "ldr x25, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "ldr s18, [x27, x22]\n"
+ "ldr s15, [x25, x22]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "fmla v1.4s, v14.4s, v11.4s\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "fmla v3.4s, v14.4s, v6.4s\n"
+ "ldr s14, [x21, x22]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr s17, [x20, x22]\n"
+ "fmla v0.4s, v14.4s, v13.4s\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v1.4s, v18.4s, v7.4s\n"
+ "ldr x27, [%[inptrs], 168]\n"
+ "fmla v3.4s, v16.4s, v8.4s\n"
+ "ldr s18, [x19, x22]\n"
+ "fmla v2.4s, v14.4s, v7.4s\n"
+ "ldr s13, [x27, x22]\n"
+ "ldr x25, [%[inptrs], 136]\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v1.4s, v15.4s, v9.4s\n"
+ "ldr x27, [%[inptrs], 176]\n"
+ "fmla v3.4s, v14.4s, v5.4s\n"
+ "ldr s16, [x25, x22]\n"
+ "fmla v2.4s, v17.4s, v9.4s\n"
+ "ldr s17, [x21, x22]\n"
+ "fmla v0.4s, v16.4s, v12.4s\n"
+ "ldr s12, [x20, x22]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "ldr s15, [x27, x22]\n"
+ "fmax v3.4s, v3.4s, v10.4s\n"
+ "ldr x25, [%[inptrs], 144]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v0.4s, v17.4s, v11.4s\n"
+ "ldr s14, [x25, x22]\n"
+ "fmla v1.4s, v13.4s, v6.4s\n"
+ "ldr s11, [x21, x22]\n"
+ "ldr x27, [%[inptrs], 184]\n"
+ "ldr x25, [%[inptrs], 152]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str s3, [x21, x26]\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "fmla v1.4s, v16.4s, v8.4s\n"
+ "ldr s18, [x27, x22]\n"
+ "ldr s17, [x25, x22]\n"
+ "ldr x27, [%[inptrs], 192]\n"
+ "fmla v2.4s, v12.4s, v8.4s\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v0.4s, v14.4s, v9.4s\n"
+ "ldr s16, [x27, x22]\n"
+ "fmla v1.4s, v15.4s, v5.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "ldr s14, [%[wbptr]]\n"
+ "add x22, x22, #4\n"
+ "fmla v2.4s, v11.4s, v5.4s\n"
+ "ldr s13, [%[wbptr], #4]\n"
+ "fmla v0.4s, v11.4s, v4.4s\n"
+ "ldr s11, [%[wbptr], #8]\n"
+ "fmax v1.4s, v1.4s, v10.4s\n"
+ "ldr s12, [%[wbptr], #16]\n"
+ "mov v3.16b, v14.16b\n"
+ "ldr s9, [%[wbptr], #20]\n"
+ "fmax v2.4s, v2.4s, v10.4s\n"
+ "ldr s7, [%[wbptr], #28]\n"
+ "str s1, [x28, x26]\n"
+ "fmla v0.4s, v18.4s, v6.4s\n"
+ "mov v1.16b, v14.16b\n"
+ "ldr s4, [%[wbptr], #12]\n"
+ "str s2, [x21, x26]\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "mov v2.16b, v14.16b\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v0.4s, v17.4s, v8.4s\n"
+ "ldr s6, [%[wbptr], #32]\n"
+ "ldr x19, [%[inptrs], 0]\n"
+ "ldr x20, [%[inptrs], 40]\n"
+ "ldr x21, [%[inptrs], 80]\n"
+ "ldr x25, [%[inptrs], 120]\n"
+ "subs x23, x23, #1\n"
+ "ldr s15, [x19, x22]\n"
+ "fmla v0.4s, v16.4s, v5.4s\n"
+ "ldr s8, [%[wbptr], #24]\n"
+ "fmla v3.4s, v15.4s, v13.4s\n"
+ "ldr s17, [x20, x22]\n"
+ "ldr s16, [x21, x22]\n"
+ "ldr x19, [%[inptrs], 8]\n"
+ "ldr s15, [x25, x22]\n"
+ "ldr x20, [%[inptrs], 48]\n"
+ "fmax v0.4s, v0.4s, v10.4s\n"
+ "ldr s5, [%[wbptr], #36]\n"
+ "fmla v3.4s, v17.4s, v12.4s\n"
+ "ldr s10, [x19, x22]\n"
+ "ldr s17, [x20, x22]\n"
+ "ldr x19, [%[inptrs], 16]\n"
+ "str s0, [x28, x26]\n"
+ "ldr x21, [%[inptrs], 88]\n"
+ "mov v0.16b, v14.16b\n"
+ "ldr s18, [x19, x22]\n"
+ "fmla v3.4s, v10.4s, v11.4s\n"
+ "ldr s14, [x21, x22]\n"
+ "add x26, x26, #4\n"
+ "fmla v3.4s, v16.4s, v7.4s\n"
+ "bne 5b\n"
+ "6:\n"
+ "fmla v1.4s, v16.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 56]\n"
+ "fmla v3.4s, v17.4s, v9.4s\n"
+ "ldr x19, [%[inptrs], 24]\n"
+ "fmla v2.4s, v18.4s, v13.4s\n"
+ "ldr s16, [x20, x22]\n"
+ "movi v10.16b, #0\n"
+ "ldr s17, [x19, x22]\n"
+ "fmla v1.4s, v15.4s, v12.4s\n"
+ "ldr x27, [%[inptrs], 160]\n"
+ "fmla v3.4s, v18.4s, v4.4s\n"
+ "ldr x25, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "ldr s18, [x27, x22]\n"
+ "ldr s15, [x25, x22]\n"
+ "ldr x21, [%[inptrs], 96]\n"
+ "fmla v1.4s, v14.4s, v11.4s\n"
+ "ldr x20, [%[inptrs], 64]\n"
+ "fmla v3.4s, v14.4s, v6.4s\n"
+ "ldr s14, [x21, x22]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr s17, [x20, x22]\n"
+ "fmla v0.4s, v14.4s, v13.4s\n"
+ "ldr x19, [%[inptrs], 32]\n"
+ "fmla v1.4s, v18.4s, v7.4s\n"
+ "ldr x27, [%[inptrs], 168]\n"
+ "fmla v3.4s, v16.4s, v8.4s\n"
+ "ldr s18, [x19, x22]\n"
+ "fmla v2.4s, v14.4s, v7.4s\n"
+ "ldr s13, [x27, x22]\n"
+ "ldr x25, [%[inptrs], 136]\n"
+ "ldr x21, [%[inptrs], 104]\n"
+ "ldr x20, [%[inptrs], 72]\n"
+ "fmla v1.4s, v15.4s, v9.4s\n"
+ "ldr x27, [%[inptrs], 176]\n"
+ "fmla v3.4s, v14.4s, v5.4s\n"
+ "ldr s16, [x25, x22]\n"
+ "fmla v2.4s, v17.4s, v9.4s\n"
+ "ldr s17, [x21, x22]\n"
+ "fmla v0.4s, v16.4s, v12.4s\n"
+ "ldr s12, [x20, x22]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "ldr s15, [x27, x22]\n"
+ "fmax v3.4s, v3.4s, v10.4s\n"
+ "ldr x25, [%[inptrs], 144]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr x21, [%[inptrs], 112]\n"
+ "fmla v0.4s, v17.4s, v11.4s\n"
+ "ldr s14, [x25, x22]\n"
+ "fmla v1.4s, v13.4s, v6.4s\n"
+ "ldr s11, [x21, x22]\n"
+ "ldr x27, [%[inptrs], 184]\n"
+ "ldr x25, [%[inptrs], 152]\n"
+ "ldr x21, [%[outptrs], 0]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr x28, [%[outptrs], 16]\n"
+ "str s3, [x21, x26]\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "fmla v1.4s, v16.4s, v8.4s\n"
+ "ldr s18, [x27, x22]\n"
+ "ldr s17, [x25, x22]\n"
+ "ldr x27, [%[inptrs], 192]\n"
+ "fmla v2.4s, v12.4s, v8.4s\n"
+ "ldr x21, [%[outptrs], 8]\n"
+ "fmla v0.4s, v14.4s, v9.4s\n"
+ "ldr s16, [x27, x22]\n"
+ "fmla v1.4s, v15.4s, v5.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "add x22, x22, #4\n"
+ "fmla v2.4s, v11.4s, v5.4s\n"
+ "fmla v0.4s, v11.4s, v4.4s\n"
+ "fmax v1.4s, v1.4s, v10.4s\n"
+ "fmax v2.4s, v2.4s, v10.4s\n"
+ "str s1, [x28, x26]\n"
+ "fmla v0.4s, v18.4s, v6.4s\n"
+ "ldr x28, [%[outptrs], 24]\n"
+ "str s2, [x21, x26]\n"
+ "fmla v0.4s, v17.4s, v8.4s\n"
+ "fmla v0.4s, v16.4s, v5.4s\n"
+ "fmax v0.4s, v0.4s, v10.4s\n"
+ "str s0, [x28, x26]\n"
+ "add x26, x26, #4\n"
+ "7:\n"
+ : [wbptr] "+r" (weight_bias_ptr)
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long) n_channels), [outptrs] "r" (outptrs)
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
+
+template <>
+template <>
void Conv::execute_tile<ActivationFunction::ReLU6>(
int n_channels,
const void *weight_bias_ptr,
@@ -1287,6 +2277,531 @@ void Conv::execute_tile<ActivationFunction::ReLU6>(
);
}
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU6>(
+ int n_channels,
+ const void *weight_bias_ptr,
+ const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ float *outptrs[Base::output_tile_rows][Base::output_tile_cols]
+)
+{
+ __asm __volatile(
+ "mov x27, xzr\n"
+ "mov x28, xzr\n"
+ "and x26, %[n_channels], #3\n"
+ "lsr x25, %[n_channels], #2\n"
+ "cbz x25, 4f\n"
+ "1:\n"
+ "ldr q15, [%[wbptr]]\n"
+ "ldr x21, [%[inptrs], 0]\n"
+ "mov v8.16b, v15.16b\n"
+ "ldr q14, [%[wbptr], #16]\n"
+ "mov v3.16b, v15.16b\n"
+ "ldr q10, [%[wbptr], #32]\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr q7, [%[wbptr], #48]\n"
+ "mov v4.16b, v15.16b\n"
+ "ldr q13, [%[wbptr], #64]\n"
+ "ldr q5, [%[wbptr], #80]\n"
+ "ldr x22, [%[inptrs], 40]\n"
+ "ldr q0, [%[wbptr], #96]\n"
+ "ldr x20, [%[inptrs], 80]\n"
+ "ldr q9, [%[wbptr], #112]\n"
+ "ldr x23, [%[inptrs], 120]\n"
+ "ldr q6, [%[wbptr], #128]\n"
+ "subs x25, x25, #1\n"
+ "ldr q1, [%[wbptr], #144]\n"
+ "ldr q17, [x21, x27]\n"
+ "fmla v8.4s, v17.4s, v14.4s\n"
+ "ldr q18, [x22, x27]\n"
+ "ldr q16, [x20, x27]\n"
+ "ldr x21, [%[inptrs], 8]\n"
+ "ldr q17, [x23, x27]\n"
+ "ldr x22, [%[inptrs], 48]\n"
+ "ldr q11, [x21, x27]\n"
+ "ldr x20, [%[inptrs], 88]\n"
+ "fmla v8.4s, v18.4s, v13.4s\n"
+ "ldr q19, [x22, x27]\n"
+ "ldr q15, [x20, x27]\n"
+ "ldr x21, [%[inptrs], 16]\n"
+ "ldr q12, [x21, x27]\n"
+ "fmla v8.4s, v11.4s, v10.4s\n"
+ "fmla v8.4s, v16.4s, v9.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fmla v3.4s, v16.4s, v14.4s\n"
+ "ldr x22, [%[inptrs], 56]\n"
+ "fmla v8.4s, v19.4s, v5.4s\n"
+ "ldr x21, [%[inptrs], 24]\n"
+ "fmla v2.4s, v12.4s, v14.4s\n"
+ "ldr q16, [x22, x27]\n"
+ "movi v11.16b, #0\n"
+ "ldr q18, [x21, x27]\n"
+ "fmla v3.4s, v17.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 160]\n"
+ "fmla v8.4s, v12.4s, v7.4s\n"
+ "ldr x23, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v13.4s\n"
+ "ldr q19, [x20, x27]\n"
+ "fmov v12.4s, #6.0\n"
+ "ldr q17, [x23, x27]\n"
+ "fmla v3.4s, v15.4s, v10.4s\n"
+ "ldr x20, [%[inptrs], 96]\n"
+ "fmla v8.4s, v15.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 64]\n"
+ "fmla v2.4s, v18.4s, v10.4s\n"
+ "ldr q15, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v14.4s\n"
+ "ldr q18, [x22, x27]\n"
+ "fmla v3.4s, v19.4s, v9.4s\n"
+ "ldr x21, [%[inptrs], 32]\n"
+ "fmla v8.4s, v16.4s, v0.4s\n"
+ "ldr x20, [%[inptrs], 168]\n"
+ "fmla v2.4s, v15.4s, v9.4s\n"
+ "ldr q19, [x21, x27]\n"
+ "ldr q16, [x20, x27]\n"
+ "ldr x23, [%[inptrs], 136]\n"
+ "fmla v3.4s, v17.4s, v5.4s\n"
+ "ldr x20, [%[inptrs], 104]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr q14, [x23, x27]\n"
+ "fmla v2.4s, v18.4s, v5.4s\n"
+ "ldr q17, [x20, x27]\n"
+ "fmla v4.4s, v14.4s, v13.4s\n"
+ "ldr x22, [%[inptrs], 72]\n"
+ "fmla v3.4s, v15.4s, v7.4s\n"
+ "ldr x20, [%[inptrs], 176]\n"
+ "fmax v8.4s, v8.4s, v11.4s\n"
+ "ldr q18, [x22, x27]\n"
+ "fmla v2.4s, v19.4s, v7.4s\n"
+ "ldr q13, [x20, x27]\n"
+ "fmla v4.4s, v17.4s, v10.4s\n"
+ "ldr x23, [%[inptrs], 144]\n"
+ "fmla v3.4s, v16.4s, v6.4s\n"
+ "ldr x20, [%[inptrs], 112]\n"
+ "fmin v8.4s, v8.4s, v12.4s\n"
+ "ldr q10, [x23, x27]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr q15, [x20, x27]\n"
+ "fmla v4.4s, v13.4s, v9.4s\n"
+ "ldr x20, [%[inptrs], 184]\n"
+ "fmla v3.4s, v14.4s, v0.4s\n"
+ "ldr x23, [%[inptrs], 152]\n"
+ "ldr q9, [x20, x27]\n"
+ "ldr x22, [%[outptrs], 0]\n"
+ "fmla v2.4s, v18.4s, v0.4s\n"
+ "ldr q19, [x23, x27]\n"
+ "str q8, [x22, x28]\n"
+ "fmla v4.4s, v10.4s, v5.4s\n"
+ "fmla v3.4s, v13.4s, v1.4s\n"
+ "ldr x20, [%[inptrs], 192]\n"
+ "ldr x22, [%[outptrs], 8]\n"
+ "ldr x24, [%[outptrs], 16]\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v2.4s, v15.4s, v1.4s\n"
+ "ldr q16, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v7.4s\n"
+ "ldr q15, [%[wbptr]]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "ldr q14, [%[wbptr], #16]\n"
+ "mov v8.16b, v15.16b\n"
+ "ldr q10, [%[wbptr], #32]\n"
+ "fmax v2.4s, v2.4s, v11.4s\n"
+ "ldr q13, [%[wbptr], #64]\n"
+ "fmla v4.4s, v9.4s, v6.4s\n"
+ "ldr q7, [%[wbptr], #48]\n"
+ "fmin v3.4s, v3.4s, v12.4s\n"
+ "ldr q5, [%[wbptr], #80]\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "ldr q9, [%[wbptr], #112]\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "add x27, x27, #16\n"
+ "str q3, [x24, x28]\n"
+ "fmla v4.4s, v19.4s, v0.4s\n"
+ "str q2, [x22, x28]\n"
+ "mov v3.16b, v15.16b\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr q6, [%[wbptr], #128]\n"
+ "ldr x24, [%[outptrs], 24]\n"
+ "ldr x21, [%[inptrs], 0]\n"
+ "ldr x22, [%[inptrs], 40]\n"
+ "fmla v4.4s, v16.4s, v1.4s\n"
+ "ldr q0, [%[wbptr], #96]\n"
+ "ldr q17, [x21, x27]\n"
+ "ldr x20, [%[inptrs], 80]\n"
+ "fmla v8.4s, v17.4s, v14.4s\n"
+ "ldr q18, [x22, x27]\n"
+ "ldr q16, [x20, x27]\n"
+ "ldr x21, [%[inptrs], 8]\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "ldr q1, [%[wbptr], #144]\n"
+ "ldr q11, [x21, x27]\n"
+ "ldr x22, [%[inptrs], 48]\n"
+ "fmla v8.4s, v18.4s, v13.4s\n"
+ "ldr x21, [%[inptrs], 16]\n"
+ "fmin v4.4s, v4.4s, v12.4s\n"
+ "ldr q19, [x22, x27]\n"
+ "ldr q12, [x21, x27]\n"
+ "ldr x23, [%[inptrs], 120]\n"
+ "ldr x20, [%[inptrs], 88]\n"
+ "subs x25, x25, #1\n"
+ "str q4, [x24, x28]\n"
+ "mov v4.16b, v15.16b\n"
+ "ldr q17, [x23, x27]\n"
+ "fmla v8.4s, v11.4s, v10.4s\n"
+ "ldr q15, [x20, x27]\n"
+ "add x28, x28, #16\n"
+ "fmla v8.4s, v16.4s, v9.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fmla v3.4s, v16.4s, v14.4s\n"
+ "ldr x22, [%[inptrs], 56]\n"
+ "fmla v8.4s, v19.4s, v5.4s\n"
+ "ldr x21, [%[inptrs], 24]\n"
+ "fmla v2.4s, v12.4s, v14.4s\n"
+ "ldr q16, [x22, x27]\n"
+ "movi v11.16b, #0\n"
+ "ldr q18, [x21, x27]\n"
+ "fmla v3.4s, v17.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 160]\n"
+ "fmla v8.4s, v12.4s, v7.4s\n"
+ "ldr x23, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v13.4s\n"
+ "ldr q19, [x20, x27]\n"
+ "fmov v12.4s, #6.0\n"
+ "ldr q17, [x23, x27]\n"
+ "fmla v3.4s, v15.4s, v10.4s\n"
+ "ldr x20, [%[inptrs], 96]\n"
+ "fmla v8.4s, v15.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 64]\n"
+ "fmla v2.4s, v18.4s, v10.4s\n"
+ "ldr q15, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v14.4s\n"
+ "ldr q18, [x22, x27]\n"
+ "fmla v3.4s, v19.4s, v9.4s\n"
+ "ldr x21, [%[inptrs], 32]\n"
+ "fmla v8.4s, v16.4s, v0.4s\n"
+ "ldr x20, [%[inptrs], 168]\n"
+ "fmla v2.4s, v15.4s, v9.4s\n"
+ "ldr q19, [x21, x27]\n"
+ "ldr q16, [x20, x27]\n"
+ "ldr x23, [%[inptrs], 136]\n"
+ "fmla v3.4s, v17.4s, v5.4s\n"
+ "ldr x20, [%[inptrs], 104]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr q14, [x23, x27]\n"
+ "fmla v2.4s, v18.4s, v5.4s\n"
+ "ldr q17, [x20, x27]\n"
+ "fmla v4.4s, v14.4s, v13.4s\n"
+ "ldr x22, [%[inptrs], 72]\n"
+ "fmla v3.4s, v15.4s, v7.4s\n"
+ "ldr x20, [%[inptrs], 176]\n"
+ "fmax v8.4s, v8.4s, v11.4s\n"
+ "ldr q18, [x22, x27]\n"
+ "fmla v2.4s, v19.4s, v7.4s\n"
+ "ldr q13, [x20, x27]\n"
+ "fmla v4.4s, v17.4s, v10.4s\n"
+ "ldr x23, [%[inptrs], 144]\n"
+ "fmla v3.4s, v16.4s, v6.4s\n"
+ "ldr x20, [%[inptrs], 112]\n"
+ "fmin v8.4s, v8.4s, v12.4s\n"
+ "ldr q10, [x23, x27]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr q15, [x20, x27]\n"
+ "fmla v4.4s, v13.4s, v9.4s\n"
+ "ldr x20, [%[inptrs], 184]\n"
+ "fmla v3.4s, v14.4s, v0.4s\n"
+ "ldr x23, [%[inptrs], 152]\n"
+ "ldr q9, [x20, x27]\n"
+ "ldr x22, [%[outptrs], 0]\n"
+ "fmla v2.4s, v18.4s, v0.4s\n"
+ "ldr q19, [x23, x27]\n"
+ "str q8, [x22, x28]\n"
+ "fmla v4.4s, v10.4s, v5.4s\n"
+ "fmla v3.4s, v13.4s, v1.4s\n"
+ "ldr x20, [%[inptrs], 192]\n"
+ "ldr x22, [%[outptrs], 8]\n"
+ "ldr x24, [%[outptrs], 16]\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v2.4s, v15.4s, v1.4s\n"
+ "ldr q16, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v7.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "add x27, x27, #16\n"
+ "fmax v2.4s, v2.4s, v11.4s\n"
+ "fmla v4.4s, v9.4s, v6.4s\n"
+ "fmin v3.4s, v3.4s, v12.4s\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "str q3, [x24, x28]\n"
+ "fmla v4.4s, v19.4s, v0.4s\n"
+ "str q2, [x22, x28]\n"
+ "ldr x24, [%[outptrs], 24]\n"
+ "fmla v4.4s, v16.4s, v1.4s\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "fmin v4.4s, v4.4s, v12.4s\n"
+ "str q4, [x24, x28]\n"
+ "add x28, x28, #16\n"
+ "4:\n"
+ "cbz x26, 7f\n"
+ "ldr s15, [%[wbptr]]\n"
+ "mov v8.16b, v15.16b\n"
+ "ldr s14, [%[wbptr], #4]\n"
+ "mov v3.16b, v15.16b\n"
+ "ldr s10, [%[wbptr], #8]\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr s7, [%[wbptr], #12]\n"
+ "mov v4.16b, v15.16b\n"
+ "ldr s13, [%[wbptr], #16]\n"
+ "ldr s5, [%[wbptr], #20]\n"
+ "ldr x21, [%[inptrs], 0]\n"
+ "ldr s0, [%[wbptr], #24]\n"
+ "ldr x22, [%[inptrs], 40]\n"
+ "ldr s9, [%[wbptr], #28]\n"
+ "ldr x20, [%[inptrs], 80]\n"
+ "ldr s6, [%[wbptr], #32]\n"
+ "ldr x23, [%[inptrs], 120]\n"
+ "ldr s1, [%[wbptr], #36]\n"
+ "subs x26, x26, #1\n"
+ "ldr s17, [x21, x27]\n"
+ "ldr s18, [x22, x27]\n"
+ "fmla v8.4s, v17.4s, v14.4s\n"
+ "ldr s16, [x20, x27]\n"
+ "ldr s17, [x23, x27]\n"
+ "ldr x21, [%[inptrs], 8]\n"
+ "ldr x22, [%[inptrs], 48]\n"
+ "ldr x20, [%[inptrs], 88]\n"
+ "ldr s11, [x21, x27]\n"
+ "fmla v8.4s, v18.4s, v13.4s\n"
+ "ldr s19, [x22, x27]\n"
+ "ldr s15, [x20, x27]\n"
+ "ldr x21, [%[inptrs], 16]\n"
+ "ldr s12, [x21, x27]\n"
+ "fmla v8.4s, v11.4s, v10.4s\n"
+ "fmla v8.4s, v16.4s, v9.4s\n"
+ "beq 6f\n"
+ "5:\n"
+ "fmla v3.4s, v16.4s, v14.4s\n"
+ "ldr x22, [%[inptrs], 56]\n"
+ "fmla v8.4s, v19.4s, v5.4s\n"
+ "ldr x21, [%[inptrs], 24]\n"
+ "fmla v2.4s, v12.4s, v14.4s\n"
+ "ldr s16, [x22, x27]\n"
+ "movi v11.16b, #0\n"
+ "ldr s18, [x21, x27]\n"
+ "fmla v3.4s, v17.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 160]\n"
+ "fmla v8.4s, v12.4s, v7.4s\n"
+ "ldr x23, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v13.4s\n"
+ "ldr s19, [x20, x27]\n"
+ "fmov v12.4s, #6.0\n"
+ "ldr s17, [x23, x27]\n"
+ "fmla v3.4s, v15.4s, v10.4s\n"
+ "ldr x20, [%[inptrs], 96]\n"
+ "fmla v8.4s, v15.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 64]\n"
+ "fmla v2.4s, v18.4s, v10.4s\n"
+ "ldr s15, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v14.4s\n"
+ "ldr s18, [x22, x27]\n"
+ "fmla v3.4s, v19.4s, v9.4s\n"
+ "ldr x21, [%[inptrs], 32]\n"
+ "fmla v8.4s, v16.4s, v0.4s\n"
+ "ldr x20, [%[inptrs], 168]\n"
+ "fmla v2.4s, v15.4s, v9.4s\n"
+ "ldr s19, [x21, x27]\n"
+ "ldr s16, [x20, x27]\n"
+ "ldr x23, [%[inptrs], 136]\n"
+ "fmla v3.4s, v17.4s, v5.4s\n"
+ "ldr x20, [%[inptrs], 104]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr s14, [x23, x27]\n"
+ "fmla v2.4s, v18.4s, v5.4s\n"
+ "ldr s17, [x20, x27]\n"
+ "fmla v4.4s, v14.4s, v13.4s\n"
+ "ldr x22, [%[inptrs], 72]\n"
+ "fmla v3.4s, v15.4s, v7.4s\n"
+ "ldr x20, [%[inptrs], 176]\n"
+ "fmax v8.4s, v8.4s, v11.4s\n"
+ "ldr s18, [x22, x27]\n"
+ "fmla v2.4s, v19.4s, v7.4s\n"
+ "ldr s13, [x20, x27]\n"
+ "fmla v4.4s, v17.4s, v10.4s\n"
+ "ldr x23, [%[inptrs], 144]\n"
+ "fmla v3.4s, v16.4s, v6.4s\n"
+ "ldr x20, [%[inptrs], 112]\n"
+ "fmin v8.4s, v8.4s, v12.4s\n"
+ "ldr s10, [x23, x27]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr s15, [x20, x27]\n"
+ "fmla v4.4s, v13.4s, v9.4s\n"
+ "ldr x20, [%[inptrs], 184]\n"
+ "fmla v3.4s, v14.4s, v0.4s\n"
+ "ldr x23, [%[inptrs], 152]\n"
+ "ldr s9, [x20, x27]\n"
+ "ldr x22, [%[outptrs], 0]\n"
+ "fmla v2.4s, v18.4s, v0.4s\n"
+ "ldr s19, [x23, x27]\n"
+ "str s8, [x22, x28]\n"
+ "fmla v4.4s, v10.4s, v5.4s\n"
+ "fmla v3.4s, v13.4s, v1.4s\n"
+ "ldr x20, [%[inptrs], 192]\n"
+ "ldr x22, [%[outptrs], 8]\n"
+ "ldr x24, [%[outptrs], 16]\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v2.4s, v15.4s, v1.4s\n"
+ "ldr s16, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v7.4s\n"
+ "ldr s15, [%[wbptr]]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "ldr s14, [%[wbptr], #4]\n"
+ "mov v8.16b, v15.16b\n"
+ "ldr s10, [%[wbptr], #8]\n"
+ "fmax v2.4s, v2.4s, v11.4s\n"
+ "ldr s13, [%[wbptr], #16]\n"
+ "fmla v4.4s, v9.4s, v6.4s\n"
+ "ldr s7, [%[wbptr], #12]\n"
+ "fmin v3.4s, v3.4s, v12.4s\n"
+ "ldr s5, [%[wbptr], #20]\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "ldr s9, [%[wbptr], #28]\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "add x27, x27, #4\n"
+ "str s3, [x24, x28]\n"
+ "fmla v4.4s, v19.4s, v0.4s\n"
+ "str s2, [x22, x28]\n"
+ "mov v3.16b, v15.16b\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr s6, [%[wbptr], #32]\n"
+ "ldr x24, [%[outptrs], 24]\n"
+ "ldr x21, [%[inptrs], 0]\n"
+ "ldr x22, [%[inptrs], 40]\n"
+ "fmla v4.4s, v16.4s, v1.4s\n"
+ "ldr s0, [%[wbptr], #24]\n"
+ "ldr s17, [x21, x27]\n"
+ "ldr x20, [%[inptrs], 80]\n"
+ "fmla v8.4s, v17.4s, v14.4s\n"
+ "ldr s18, [x22, x27]\n"
+ "ldr s16, [x20, x27]\n"
+ "ldr x21, [%[inptrs], 8]\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "ldr s1, [%[wbptr], #36]\n"
+ "ldr s11, [x21, x27]\n"
+ "ldr x22, [%[inptrs], 48]\n"
+ "fmla v8.4s, v18.4s, v13.4s\n"
+ "ldr x21, [%[inptrs], 16]\n"
+ "fmin v4.4s, v4.4s, v12.4s\n"
+ "ldr s19, [x22, x27]\n"
+ "ldr s12, [x21, x27]\n"
+ "ldr x23, [%[inptrs], 120]\n"
+ "ldr x20, [%[inptrs], 88]\n"
+ "subs x26, x26, #1\n"
+ "str s4, [x24, x28]\n"
+ "mov v4.16b, v15.16b\n"
+ "ldr s17, [x23, x27]\n"
+ "fmla v8.4s, v11.4s, v10.4s\n"
+ "ldr s15, [x20, x27]\n"
+ "add x28, x28, #4\n"
+ "fmla v8.4s, v16.4s, v9.4s\n"
+ "bne 5b\n"
+ "6:\n"
+ "fmla v3.4s, v16.4s, v14.4s\n"
+ "ldr x22, [%[inptrs], 56]\n"
+ "fmla v8.4s, v19.4s, v5.4s\n"
+ "ldr x21, [%[inptrs], 24]\n"
+ "fmla v2.4s, v12.4s, v14.4s\n"
+ "ldr s16, [x22, x27]\n"
+ "movi v11.16b, #0\n"
+ "ldr s18, [x21, x27]\n"
+ "fmla v3.4s, v17.4s, v13.4s\n"
+ "ldr x20, [%[inptrs], 160]\n"
+ "fmla v8.4s, v12.4s, v7.4s\n"
+ "ldr x23, [%[inptrs], 128]\n"
+ "fmla v2.4s, v16.4s, v13.4s\n"
+ "ldr s19, [x20, x27]\n"
+ "fmov v12.4s, #6.0\n"
+ "ldr s17, [x23, x27]\n"
+ "fmla v3.4s, v15.4s, v10.4s\n"
+ "ldr x20, [%[inptrs], 96]\n"
+ "fmla v8.4s, v15.4s, v6.4s\n"
+ "ldr x22, [%[inptrs], 64]\n"
+ "fmla v2.4s, v18.4s, v10.4s\n"
+ "ldr s15, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v14.4s\n"
+ "ldr s18, [x22, x27]\n"
+ "fmla v3.4s, v19.4s, v9.4s\n"
+ "ldr x21, [%[inptrs], 32]\n"
+ "fmla v8.4s, v16.4s, v0.4s\n"
+ "ldr x20, [%[inptrs], 168]\n"
+ "fmla v2.4s, v15.4s, v9.4s\n"
+ "ldr s19, [x21, x27]\n"
+ "ldr s16, [x20, x27]\n"
+ "ldr x23, [%[inptrs], 136]\n"
+ "fmla v3.4s, v17.4s, v5.4s\n"
+ "ldr x20, [%[inptrs], 104]\n"
+ "fmla v8.4s, v15.4s, v1.4s\n"
+ "ldr s14, [x23, x27]\n"
+ "fmla v2.4s, v18.4s, v5.4s\n"
+ "ldr s17, [x20, x27]\n"
+ "fmla v4.4s, v14.4s, v13.4s\n"
+ "ldr x22, [%[inptrs], 72]\n"
+ "fmla v3.4s, v15.4s, v7.4s\n"
+ "ldr x20, [%[inptrs], 176]\n"
+ "fmax v8.4s, v8.4s, v11.4s\n"
+ "ldr s18, [x22, x27]\n"
+ "fmla v2.4s, v19.4s, v7.4s\n"
+ "ldr s13, [x20, x27]\n"
+ "fmla v4.4s, v17.4s, v10.4s\n"
+ "ldr x23, [%[inptrs], 144]\n"
+ "fmla v3.4s, v16.4s, v6.4s\n"
+ "ldr x20, [%[inptrs], 112]\n"
+ "fmin v8.4s, v8.4s, v12.4s\n"
+ "ldr s10, [x23, x27]\n"
+ "fmla v2.4s, v17.4s, v6.4s\n"
+ "ldr s15, [x20, x27]\n"
+ "fmla v4.4s, v13.4s, v9.4s\n"
+ "ldr x20, [%[inptrs], 184]\n"
+ "fmla v3.4s, v14.4s, v0.4s\n"
+ "ldr x23, [%[inptrs], 152]\n"
+ "ldr s9, [x20, x27]\n"
+ "ldr x22, [%[outptrs], 0]\n"
+ "fmla v2.4s, v18.4s, v0.4s\n"
+ "ldr s19, [x23, x27]\n"
+ "str s8, [x22, x28]\n"
+ "fmla v4.4s, v10.4s, v5.4s\n"
+ "fmla v3.4s, v13.4s, v1.4s\n"
+ "ldr x20, [%[inptrs], 192]\n"
+ "ldr x22, [%[outptrs], 8]\n"
+ "ldr x24, [%[outptrs], 16]\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v2.4s, v15.4s, v1.4s\n"
+ "ldr s16, [x20, x27]\n"
+ "fmla v4.4s, v15.4s, v7.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "add x27, x27, #4\n"
+ "fmax v2.4s, v2.4s, v11.4s\n"
+ "fmla v4.4s, v9.4s, v6.4s\n"
+ "fmin v3.4s, v3.4s, v12.4s\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "str s3, [x24, x28]\n"
+ "fmla v4.4s, v19.4s, v0.4s\n"
+ "str s2, [x22, x28]\n"
+ "ldr x24, [%[outptrs], 24]\n"
+ "fmla v4.4s, v16.4s, v1.4s\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "fmin v4.4s, v4.4s, v12.4s\n"
+ "str s4, [x24, x28]\n"
+ "add x28, x28, #4\n"
+ "7:\n"
+ : [wbptr] "+r" (weight_bias_ptr)
+ : [inptrs] "r" (inptrs), [outptrs] "r" (outptrs), [n_channels] "r" ((long) n_channels)
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
+
#endif // __aarch64__
template class DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>;