aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-03-11 14:03:23 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-03-29 09:54:53 +0000
commit47d39dc615d1dee2482bc84699802165a9778ac8 (patch)
tree87f2fdb4f4957be7ff1c043be6328e4154cdf9e1 /src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
parent2d2551ed3934f071eb6a65f5b776301454bc147a (diff)
downloadComputeLibrary-47d39dc615d1dee2482bc84699802165a9778ac8.tar.gz
COMPMID-1975: Update depthwise convolution.
Change-Id: Iad58672be35710a7ec2e918653d6d529709387e8 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/898 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp')
-rw-r--r--src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp1318
1 files changed, 1257 insertions, 61 deletions
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
index 9ce43f949b..4ac6276123 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,74 +25,1270 @@
namespace depthwise
{
-using Conv = DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<2, 2, 3, 3, 2, 2, float, float>;
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
+using namespace neon_convolution_kernels;
+using Conv = DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>;
+#ifdef __aarch64__
template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
- ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
- ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
- ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
+void Conv::execute_tile<ActivationFunction::None>(
+ int n_channels,
+ const void *weight_bias_ptr,
+ const float *input,
+ const unsigned int input_row_stride,
+ const unsigned int input_col_stride,
+ float *output,
+ const unsigned int output_row_stride,
+ const unsigned int output_col_stride
+)
+{
+ __asm __volatile(
+ "add x23, %[inptr0], %[input_row_stride]\n"
+ "add x19, %[input_col_stride1], %[input_col_stride1]\n"
+ "add x22, %[outptr0], %[output_row_stride]\n"
+ "add x24, x23, %[input_row_stride]\n"
+ "add x20, x19, %[input_col_stride1]\n"
+ "and x27, %[n_channels], #3\n"
+ "add x25, x24, %[input_row_stride]\n"
+ "add x21, x20, %[input_col_stride1]\n"
+ "lsr x28, %[n_channels], #2\n"
+ "add x26, x25, %[input_row_stride]\n"
+ "cbz x28, 4f\n"
+ "1:\n"
+ "ldr q14, [%[wbptr]]\n"
+ "subs x28, x28, #1\n"
+ "mov v12.16b, v14.16b\n"
+ "ldr q8, [%[wbptr], #16]\n"
+ "mov v10.16b, v14.16b\n"
+ "ldr q7, [%[wbptr], #32]\n"
+ "mov v11.16b, v14.16b\n"
+ "ldr q6, [%[wbptr], #48]\n"
+ "mov v9.16b, v14.16b\n"
+ "ldr q5, [%[wbptr], #64]\n"
+ "ldr q4, [%[wbptr], #80]\n"
+ "ldr q3, [%[wbptr], #96]\n"
+ "ldr q2, [%[wbptr], #112]\n"
+ "ldr q1, [%[wbptr], #128]\n"
+ "ldr q0, [%[wbptr], #144]\n"
+ "ldr q15, [%[inptr0]]\n"
+ "fmla v12.4s, v15.4s, v8.4s\n"
+ "ldr q20, [x23]\n"
+ "ldr q13, [%[inptr0], %[input_col_stride1]]\n"
+ "ldr q17, [x24]\n"
+ "fmla v10.4s, v17.4s, v8.4s\n"
+ "ldr q16, [x23, %[input_col_stride1]]\n"
+ "fmla v12.4s, v20.4s, v5.4s\n"
+ "ldr q18, [%[inptr0], x19]\n"
+ "ldr q14, [x25]\n"
+ "ldr q15, [x24, %[input_col_stride1]]\n"
+ "fmla v12.4s, v13.4s, v7.4s\n"
+ "fmla v12.4s, v17.4s, v2.4s\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "fmla v12.4s, v18.4s, v6.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fmla v11.4s, v18.4s, v8.4s\n"
+ "ldr q19, [x23, x19]\n"
+ "fmla v10.4s, v14.4s, v5.4s\n"
+ "ldr q20, [%[inptr0], x20]\n"
+ "fmla v12.4s, v15.4s, v1.4s\n"
+ "ldr q14, [x26]\n"
+ "fmla v11.4s, v19.4s, v5.4s\n"
+ "ldr q13, [x25, %[input_col_stride1]]\n"
+ "fmla v10.4s, v15.4s, v7.4s\n"
+ "ldr q17, [x24, x19]\n"
+ "fmla v12.4s, v19.4s, v3.4s\n"
+ "ldr q19, [x23, x20]\n"
+ "fmla v11.4s, v20.4s, v7.4s\n"
+ "ldr q18, [%[inptr0], x21]\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "ldr q16, [x26, %[input_col_stride1]]\n"
+ "fmla v12.4s, v17.4s, v0.4s\n"
+ "ldr q14, [x25, x19]\n"
+ "fmla v11.4s, v17.4s, v2.4s\n"
+ "ldr q15, [x24, x20]\n"
+ "fmla v10.4s, v13.4s, v4.4s\n"
+ "ldr q13, [x23, x21]\n"
+ "str q12, [%[outptr0]]\n"
+ "fmla v9.4s, v17.4s, v8.4s\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr q12, [x26, x19]\n"
+ "fmla v10.4s, v17.4s, v6.4s\n"
+ "ldr q20, [x25, x20]\n"
+ "fmla v9.4s, v14.4s, v5.4s\n"
+ "ldr q17, [x24, x21]\n"
+ "fmla v11.4s, v18.4s, v6.4s\n"
+ "ldr q19, [x26, x20]\n"
+ "fmla v10.4s, v16.4s, v1.4s\n"
+ "ldr q18, [x25, x21]\n"
+ "fmla v9.4s, v15.4s, v7.4s\n"
+ "ldr q16, [x26, x21]\n"
+ "fmla v11.4s, v15.4s, v1.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "ldr q14, [%[wbptr]]\n"
+ "fmla v9.4s, v12.4s, v2.4s\n"
+ "ldr q8, [%[wbptr], #16]\n"
+ "fmla v11.4s, v13.4s, v3.4s\n"
+ "ldr q7, [%[wbptr], #32]\n"
+ "fmla v10.4s, v12.4s, v0.4s\n"
+ "ldr q5, [%[wbptr], #64]\n"
+ "fmla v9.4s, v20.4s, v4.4s\n"
+ "ldr q2, [%[wbptr], #112]\n"
+ "fmla v11.4s, v17.4s, v0.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "str q10, [x22]\n"
+ "mov v12.16b, v14.16b\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "ldr q4, [%[wbptr], #80]\n"
+ "str q11, [%[outptr0], %[output_col_stride1]]\n"
+ "mov v10.16b, v14.16b\n"
+ "mov v11.16b, v14.16b\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "ldr q6, [%[wbptr], #48]\n"
+ "ldr q15, [%[inptr0]]\n"
+ "add x23, x23, #16\n"
+ "fmla v12.4s, v15.4s, v8.4s\n"
+ "ldr q20, [x23]\n"
+ "fmla v9.4s, v18.4s, v3.4s\n"
+ "ldr q1, [%[wbptr], #128]\n"
+ "ldr q13, [%[inptr0], %[input_col_stride1]]\n"
+ "add x24, x24, #16\n"
+ "fmla v12.4s, v20.4s, v5.4s\n"
+ "ldr q17, [x24]\n"
+ "fmla v9.4s, v16.4s, v0.4s\n"
+ "ldr q3, [%[wbptr], #96]\n"
+ "fmla v10.4s, v17.4s, v8.4s\n"
+ "ldr q16, [x23, %[input_col_stride1]]\n"
+ "fmla v12.4s, v13.4s, v7.4s\n"
+ "ldr q18, [%[inptr0], x19]\n"
+ "str q9, [x22, %[output_col_stride1]]\n"
+ "add x25, x25, #16\n"
+ "mov v9.16b, v14.16b\n"
+ "ldr q0, [%[wbptr], #144]\n"
+ "fmla v12.4s, v17.4s, v2.4s\n"
+ "ldr q14, [x25]\n"
+ "ldr q15, [x24, %[input_col_stride1]]\n"
+ "add x26, x26, #16\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "add x22, x22, #16\n"
+ "subs x28, x28, #1\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "fmla v12.4s, v18.4s, v6.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fmla v11.4s, v18.4s, v8.4s\n"
+ "ldr q19, [x23, x19]\n"
+ "fmla v10.4s, v14.4s, v5.4s\n"
+ "ldr q20, [%[inptr0], x20]\n"
+ "fmla v12.4s, v15.4s, v1.4s\n"
+ "ldr q14, [x26]\n"
+ "fmla v11.4s, v19.4s, v5.4s\n"
+ "ldr q13, [x25, %[input_col_stride1]]\n"
+ "fmla v10.4s, v15.4s, v7.4s\n"
+ "ldr q17, [x24, x19]\n"
+ "fmla v12.4s, v19.4s, v3.4s\n"
+ "ldr q19, [x23, x20]\n"
+ "fmla v11.4s, v20.4s, v7.4s\n"
+ "ldr q18, [%[inptr0], x21]\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "ldr q16, [x26, %[input_col_stride1]]\n"
+ "fmla v12.4s, v17.4s, v0.4s\n"
+ "ldr q14, [x25, x19]\n"
+ "fmla v11.4s, v17.4s, v2.4s\n"
+ "ldr q15, [x24, x20]\n"
+ "fmla v10.4s, v13.4s, v4.4s\n"
+ "ldr q13, [x23, x21]\n"
+ "str q12, [%[outptr0]]\n"
+ "fmla v9.4s, v17.4s, v8.4s\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr q12, [x26, x19]\n"
+ "fmla v10.4s, v17.4s, v6.4s\n"
+ "ldr q20, [x25, x20]\n"
+ "fmla v9.4s, v14.4s, v5.4s\n"
+ "ldr q17, [x24, x21]\n"
+ "fmla v11.4s, v18.4s, v6.4s\n"
+ "ldr q19, [x26, x20]\n"
+ "fmla v10.4s, v16.4s, v1.4s\n"
+ "ldr q18, [x25, x21]\n"
+ "fmla v9.4s, v15.4s, v7.4s\n"
+ "ldr q16, [x26, x21]\n"
+ "fmla v11.4s, v15.4s, v1.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v9.4s, v12.4s, v2.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fmla v11.4s, v13.4s, v3.4s\n"
+ "add x23, x23, #16\n"
+ "fmla v10.4s, v12.4s, v0.4s\n"
+ "add x24, x24, #16\n"
+ "fmla v9.4s, v20.4s, v4.4s\n"
+ "add x25, x25, #16\n"
+ "fmla v11.4s, v17.4s, v0.4s\n"
+ "add x26, x26, #16\n"
+ "str q10, [x22]\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "str q11, [%[outptr0], %[output_col_stride1]]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "fmla v9.4s, v18.4s, v3.4s\n"
+ "fmla v9.4s, v16.4s, v0.4s\n"
+ "str q9, [x22, %[output_col_stride1]]\n"
+ "add x22, x22, #16\n"
+ "4:\n"
+ "cbz x27, 7f\n"
+ "ldr s14, [%[wbptr]]\n"
+ "mov v12.16b, v14.16b\n"
+ "ldr s8, [%[wbptr], #4]\n"
+ "mov v10.16b, v14.16b\n"
+ "ldr s7, [%[wbptr], #8]\n"
+ "mov v11.16b, v14.16b\n"
+ "ldr s6, [%[wbptr], #12]\n"
+ "mov v9.16b, v14.16b\n"
+ "ldr s5, [%[wbptr], #16]\n"
+ "ldr s4, [%[wbptr], #20]\n"
+ "subs x27, x27, #1\n"
+ "ldr s3, [%[wbptr], #24]\n"
+ "ldr s2, [%[wbptr], #28]\n"
+ "ldr s1, [%[wbptr], #32]\n"
+ "ldr s0, [%[wbptr], #36]\n"
+ "ldr s15, [%[inptr0]]\n"
+ "ldr s20, [x23]\n"
+ "fmla v12.4s, v15.4s, v8.4s\n"
+ "ldr s13, [%[inptr0], %[input_col_stride1]]\n"
+ "ldr s17, [x24]\n"
+ "ldr s16, [x23, %[input_col_stride1]]\n"
+ "fmla v10.4s, v17.4s, v8.4s\n"
+ "ldr s18, [%[inptr0], x19]\n"
+ "fmla v12.4s, v20.4s, v5.4s\n"
+ "ldr s14, [x25]\n"
+ "ldr s15, [x24, %[input_col_stride1]]\n"
+ "fmla v12.4s, v13.4s, v7.4s\n"
+ "fmla v12.4s, v17.4s, v2.4s\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "fmla v12.4s, v18.4s, v6.4s\n"
+ "beq 6f\n"
+ "5:\n"
+ "fmla v11.4s, v18.4s, v8.4s\n"
+ "ldr s19, [x23, x19]\n"
+ "fmla v10.4s, v14.4s, v5.4s\n"
+ "ldr s20, [%[inptr0], x20]\n"
+ "fmla v12.4s, v15.4s, v1.4s\n"
+ "ldr s14, [x26]\n"
+ "fmla v11.4s, v19.4s, v5.4s\n"
+ "ldr s13, [x25, %[input_col_stride1]]\n"
+ "fmla v10.4s, v15.4s, v7.4s\n"
+ "ldr s17, [x24, x19]\n"
+ "fmla v12.4s, v19.4s, v3.4s\n"
+ "ldr s19, [x23, x20]\n"
+ "fmla v11.4s, v20.4s, v7.4s\n"
+ "ldr s18, [%[inptr0], x21]\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "ldr s16, [x26, %[input_col_stride1]]\n"
+ "fmla v12.4s, v17.4s, v0.4s\n"
+ "ldr s14, [x25, x19]\n"
+ "fmla v11.4s, v17.4s, v2.4s\n"
+ "ldr s15, [x24, x20]\n"
+ "fmla v10.4s, v13.4s, v4.4s\n"
+ "ldr s13, [x23, x21]\n"
+ "str s12, [%[outptr0]]\n"
+ "fmla v9.4s, v17.4s, v8.4s\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr s12, [x26, x19]\n"
+ "fmla v10.4s, v17.4s, v6.4s\n"
+ "ldr s20, [x25, x20]\n"
+ "fmla v9.4s, v14.4s, v5.4s\n"
+ "ldr s17, [x24, x21]\n"
+ "fmla v11.4s, v18.4s, v6.4s\n"
+ "ldr s19, [x26, x20]\n"
+ "fmla v10.4s, v16.4s, v1.4s\n"
+ "ldr s18, [x25, x21]\n"
+ "fmla v9.4s, v15.4s, v7.4s\n"
+ "ldr s16, [x26, x21]\n"
+ "fmla v11.4s, v15.4s, v1.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "ldr s14, [%[wbptr]]\n"
+ "fmla v9.4s, v12.4s, v2.4s\n"
+ "ldr s8, [%[wbptr], #4]\n"
+ "fmla v11.4s, v13.4s, v3.4s\n"
+ "ldr s7, [%[wbptr], #8]\n"
+ "fmla v10.4s, v12.4s, v0.4s\n"
+ "ldr s5, [%[wbptr], #16]\n"
+ "fmla v9.4s, v20.4s, v4.4s\n"
+ "ldr s2, [%[wbptr], #28]\n"
+ "fmla v11.4s, v17.4s, v0.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "str s10, [x22]\n"
+ "mov v12.16b, v14.16b\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "ldr s4, [%[wbptr], #20]\n"
+ "str s11, [%[outptr0], %[output_col_stride1]]\n"
+ "mov v10.16b, v14.16b\n"
+ "mov v11.16b, v14.16b\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "ldr s6, [%[wbptr], #12]\n"
+ "ldr s15, [%[inptr0]]\n"
+ "add x23, x23, #4\n"
+ "fmla v12.4s, v15.4s, v8.4s\n"
+ "ldr s20, [x23]\n"
+ "fmla v9.4s, v18.4s, v3.4s\n"
+ "ldr s1, [%[wbptr], #32]\n"
+ "ldr s13, [%[inptr0], %[input_col_stride1]]\n"
+ "add x24, x24, #4\n"
+ "fmla v12.4s, v20.4s, v5.4s\n"
+ "ldr s17, [x24]\n"
+ "fmla v9.4s, v16.4s, v0.4s\n"
+ "ldr s3, [%[wbptr], #24]\n"
+ "fmla v10.4s, v17.4s, v8.4s\n"
+ "ldr s16, [x23, %[input_col_stride1]]\n"
+ "fmla v12.4s, v13.4s, v7.4s\n"
+ "ldr s18, [%[inptr0], x19]\n"
+ "str s9, [x22, %[output_col_stride1]]\n"
+ "add x25, x25, #4\n"
+ "mov v9.16b, v14.16b\n"
+ "ldr s0, [%[wbptr], #36]\n"
+ "fmla v12.4s, v17.4s, v2.4s\n"
+ "ldr s14, [x25]\n"
+ "ldr s15, [x24, %[input_col_stride1]]\n"
+ "add x26, x26, #4\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "add x22, x22, #4\n"
+ "subs x27, x27, #1\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "fmla v12.4s, v18.4s, v6.4s\n"
+ "bne 5b\n"
+ "6:\n"
+ "fmla v11.4s, v18.4s, v8.4s\n"
+ "ldr s19, [x23, x19]\n"
+ "fmla v10.4s, v14.4s, v5.4s\n"
+ "ldr s20, [%[inptr0], x20]\n"
+ "fmla v12.4s, v15.4s, v1.4s\n"
+ "ldr s14, [x26]\n"
+ "fmla v11.4s, v19.4s, v5.4s\n"
+ "ldr s13, [x25, %[input_col_stride1]]\n"
+ "fmla v10.4s, v15.4s, v7.4s\n"
+ "ldr s17, [x24, x19]\n"
+ "fmla v12.4s, v19.4s, v3.4s\n"
+ "ldr s19, [x23, x20]\n"
+ "fmla v11.4s, v20.4s, v7.4s\n"
+ "ldr s18, [%[inptr0], x21]\n"
+ "fmla v10.4s, v14.4s, v2.4s\n"
+ "ldr s16, [x26, %[input_col_stride1]]\n"
+ "fmla v12.4s, v17.4s, v0.4s\n"
+ "ldr s14, [x25, x19]\n"
+ "fmla v11.4s, v17.4s, v2.4s\n"
+ "ldr s15, [x24, x20]\n"
+ "fmla v10.4s, v13.4s, v4.4s\n"
+ "ldr s13, [x23, x21]\n"
+ "str s12, [%[outptr0]]\n"
+ "fmla v9.4s, v17.4s, v8.4s\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr s12, [x26, x19]\n"
+ "fmla v10.4s, v17.4s, v6.4s\n"
+ "ldr s20, [x25, x20]\n"
+ "fmla v9.4s, v14.4s, v5.4s\n"
+ "ldr s17, [x24, x21]\n"
+ "fmla v11.4s, v18.4s, v6.4s\n"
+ "ldr s19, [x26, x20]\n"
+ "fmla v10.4s, v16.4s, v1.4s\n"
+ "ldr s18, [x25, x21]\n"
+ "fmla v9.4s, v15.4s, v7.4s\n"
+ "ldr s16, [x26, x21]\n"
+ "fmla v11.4s, v15.4s, v1.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v10.4s, v14.4s, v3.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v9.4s, v12.4s, v2.4s\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v11.4s, v13.4s, v3.4s\n"
+ "add x23, x23, #4\n"
+ "fmla v10.4s, v12.4s, v0.4s\n"
+ "add x24, x24, #4\n"
+ "fmla v9.4s, v20.4s, v4.4s\n"
+ "add x25, x25, #4\n"
+ "fmla v11.4s, v17.4s, v0.4s\n"
+ "add x26, x26, #4\n"
+ "str s10, [x22]\n"
+ "fmla v9.4s, v17.4s, v6.4s\n"
+ "str s11, [%[outptr0], %[output_col_stride1]]\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "fmla v9.4s, v19.4s, v1.4s\n"
+ "fmla v9.4s, v18.4s, v3.4s\n"
+ "fmla v9.4s, v16.4s, v0.4s\n"
+ "str s9, [x22, %[output_col_stride1]]\n"
+ "add x22, x22, #4\n"
+ "7:\n"
+ : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr)
+ : [n_channels] "r" ((long) n_channels), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
- {
- ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
- },
-};
-
template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
- {
- ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
- },
- {
- ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
- ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
- },
-};
+void Conv::execute_tile<ActivationFunction::ReLU>(
+ int n_channels,
+ const void *weight_bias_ptr,
+ const float *input,
+ const unsigned int input_row_stride,
+ const unsigned int input_col_stride,
+ float *output,
+ const unsigned int output_row_stride,
+ const unsigned int output_col_stride
+)
+{
+ __asm __volatile(
+ "add x24, %[inptr0], %[input_row_stride]\n"
+ "add x27, %[input_col_stride1], %[input_col_stride1]\n"
+ "add x19, %[outptr0], %[output_row_stride]\n"
+ "add x25, x24, %[input_row_stride]\n"
+ "add x23, x27, %[input_col_stride1]\n"
+ "and x20, %[n_channels], #3\n"
+ "add x28, x25, %[input_row_stride]\n"
+ "add x22, x23, %[input_col_stride1]\n"
+ "lsr x21, %[n_channels], #2\n"
+ "add x26, x28, %[input_row_stride]\n"
+ "cbz x21, 4f\n"
+ "1:\n"
+ "ldr q16, [%[wbptr]]\n"
+ "subs x21, x21, #1\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr q4, [%[wbptr], #16]\n"
+ "mov v1.16b, v16.16b\n"
+ "ldr q5, [%[wbptr], #32]\n"
+ "mov v2.16b, v16.16b\n"
+ "ldr q12, [%[wbptr], #48]\n"
+ "mov v0.16b, v16.16b\n"
+ "ldr q11, [%[wbptr], #64]\n"
+ "ldr q10, [%[wbptr], #80]\n"
+ "ldr q6, [%[wbptr], #96]\n"
+ "ldr q9, [%[wbptr], #112]\n"
+ "ldr q8, [%[wbptr], #128]\n"
+ "ldr q7, [%[wbptr], #144]\n"
+ "ldr q21, [%[inptr0]]\n"
+ "fmla v3.4s, v21.4s, v4.4s\n"
+ "ldr q23, [x24]\n"
+ "ldr q19, [%[inptr0], %[input_col_stride1]]\n"
+ "ldr q14, [x25]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "ldr q13, [x24, %[input_col_stride1]]\n"
+ "fmla v3.4s, v23.4s, v11.4s\n"
+ "ldr q18, [%[inptr0], x27]\n"
+ "ldr q15, [x28]\n"
+ "ldr q22, [x25, %[input_col_stride1]]\n"
+ "fmla v3.4s, v19.4s, v5.4s\n"
+ "fmla v3.4s, v14.4s, v9.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fmla v3.4s, v13.4s, v10.4s\n"
+ "ldr q17, [x24, x27]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr q20, [%[inptr0], x23]\n"
+ "fmla v1.4s, v15.4s, v11.4s\n"
+ "ldr q19, [x26]\n"
+ "fmla v3.4s, v18.4s, v12.4s\n"
+ "ldr q13, [x28, %[input_col_stride1]]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr q14, [x25, x27]\n"
+ "fmla v1.4s, v22.4s, v5.4s\n"
+ "ldr q15, [x24, x23]\n"
+ "fmla v3.4s, v22.4s, v8.4s\n"
+ "ldr q16, [%[inptr0], x22]\n"
+ "fmla v2.4s, v20.4s, v5.4s\n"
+ "ldr q20, [x26, %[input_col_stride1]]\n"
+ "fmla v1.4s, v19.4s, v9.4s\n"
+ "ldr q19, [x28, x27]\n"
+ "fmla v3.4s, v17.4s, v6.4s\n"
+ "ldr q21, [x25, x23]\n"
+ "fmla v2.4s, v14.4s, v9.4s\n"
+ "ldr q22, [x24, x22]\n"
+ "fmla v1.4s, v13.4s, v10.4s\n"
+ "ldr q23, [x26, x27]\n"
+ "fmla v3.4s, v14.4s, v7.4s\n"
+ "ldr q18, [x28, x23]\n"
+ "fmla v0.4s, v14.4s, v4.4s\n"
+ "ldr q13, [x25, x22]\n"
+ "fmla v1.4s, v14.4s, v12.4s\n"
+ "ldr q14, [x26, x23]\n"
+ "fmla v2.4s, v15.4s, v10.4s\n"
+ "ldr q17, [x28, x22]\n"
+ "fmla v0.4s, v19.4s, v11.4s\n"
+ "ldr q15, [x26, x22]\n"
+ "fmla v1.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "ldr q16, [%[wbptr]]\n"
+ "fmla v0.4s, v21.4s, v5.4s\n"
+ "ldr q4, [%[wbptr], #16]\n"
+ "fmla v1.4s, v19.4s, v6.4s\n"
+ "ldr q11, [%[wbptr], #64]\n"
+ "fmla v2.4s, v21.4s, v8.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v0.4s, v23.4s, v9.4s\n"
+ "ldr q5, [%[wbptr], #32]\n"
+ "fmla v1.4s, v23.4s, v7.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fmla v2.4s, v22.4s, v6.4s\n"
+ "ldr q21, [%[inptr0]]\n"
+ "fmla v0.4s, v18.4s, v10.4s\n"
+ "ldr q9, [%[wbptr], #112]\n"
+ "movi v20.16b, #0\n"
+ "ldr q19, [%[inptr0], %[input_col_stride1]]\n"
+ "fmla v2.4s, v13.4s, v7.4s\n"
+ "ldr q18, [%[inptr0], x27]\n"
+ "fmla v0.4s, v13.4s, v12.4s\n"
+ "ldr q10, [%[wbptr], #80]\n"
+ "fmax v3.4s, v3.4s, v20.4s\n"
+ "add x24, x24, #16\n"
+ "fmax v2.4s, v2.4s, v20.4s\n"
+ "ldr q23, [x24]\n"
+ "str q3, [%[outptr0]]\n"
+ "fmla v0.4s, v14.4s, v8.4s\n"
+ "str q2, [%[outptr0], %[output_col_stride1]]\n"
+ "fmax v1.4s, v1.4s, v20.4s\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr q12, [%[wbptr], #48]\n"
+ "str q1, [x19]\n"
+ "fmla v0.4s, v17.4s, v6.4s\n"
+ "mov v1.16b, v16.16b\n"
+ "ldr q8, [%[wbptr], #128]\n"
+ "mov v2.16b, v16.16b\n"
+ "ldr q13, [x24, %[input_col_stride1]]\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "ldr q6, [%[wbptr], #96]\n"
+ "fmla v3.4s, v21.4s, v4.4s\n"
+ "add x25, x25, #16\n"
+ "ldr q14, [x25]\n"
+ "add x28, x28, #16\n"
+ "fmax v0.4s, v0.4s, v20.4s\n"
+ "ldr q7, [%[wbptr], #144]\n"
+ "fmla v3.4s, v23.4s, v11.4s\n"
+ "ldr q15, [x28]\n"
+ "str q0, [x19, %[output_col_stride1]]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "mov v0.16b, v16.16b\n"
+ "ldr q22, [x25, %[input_col_stride1]]\n"
+ "fmla v3.4s, v19.4s, v5.4s\n"
+ "add x26, x26, #16\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "add x19, x19, #16\n"
+ "subs x21, x21, #1\n"
+ "fmla v3.4s, v14.4s, v9.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fmla v3.4s, v13.4s, v10.4s\n"
+ "ldr q17, [x24, x27]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr q20, [%[inptr0], x23]\n"
+ "fmla v1.4s, v15.4s, v11.4s\n"
+ "ldr q19, [x26]\n"
+ "fmla v3.4s, v18.4s, v12.4s\n"
+ "ldr q13, [x28, %[input_col_stride1]]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr q14, [x25, x27]\n"
+ "fmla v1.4s, v22.4s, v5.4s\n"
+ "ldr q15, [x24, x23]\n"
+ "fmla v3.4s, v22.4s, v8.4s\n"
+ "ldr q16, [%[inptr0], x22]\n"
+ "fmla v2.4s, v20.4s, v5.4s\n"
+ "ldr q20, [x26, %[input_col_stride1]]\n"
+ "fmla v1.4s, v19.4s, v9.4s\n"
+ "ldr q19, [x28, x27]\n"
+ "fmla v3.4s, v17.4s, v6.4s\n"
+ "ldr q21, [x25, x23]\n"
+ "fmla v2.4s, v14.4s, v9.4s\n"
+ "ldr q22, [x24, x22]\n"
+ "fmla v1.4s, v13.4s, v10.4s\n"
+ "ldr q23, [x26, x27]\n"
+ "fmla v3.4s, v14.4s, v7.4s\n"
+ "ldr q18, [x28, x23]\n"
+ "fmla v0.4s, v14.4s, v4.4s\n"
+ "ldr q13, [x25, x22]\n"
+ "fmla v1.4s, v14.4s, v12.4s\n"
+ "ldr q14, [x26, x23]\n"
+ "fmla v2.4s, v15.4s, v10.4s\n"
+ "ldr q17, [x28, x22]\n"
+ "fmla v0.4s, v19.4s, v11.4s\n"
+ "ldr q15, [x26, x22]\n"
+ "fmla v1.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v0.4s, v21.4s, v5.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fmla v1.4s, v19.4s, v6.4s\n"
+ "add x24, x24, #16\n"
+ "fmla v2.4s, v21.4s, v8.4s\n"
+ "add x25, x25, #16\n"
+ "fmla v0.4s, v23.4s, v9.4s\n"
+ "add x28, x28, #16\n"
+ "fmla v1.4s, v23.4s, v7.4s\n"
+ "add x26, x26, #16\n"
+ "fmla v2.4s, v22.4s, v6.4s\n"
+ "movi v20.16b, #0\n"
+ "fmla v0.4s, v18.4s, v10.4s\n"
+ "fmax v3.4s, v3.4s, v20.4s\n"
+ "fmla v2.4s, v13.4s, v7.4s\n"
+ "fmax v1.4s, v1.4s, v20.4s\n"
+ "str q3, [%[outptr0]]\n"
+ "fmla v0.4s, v13.4s, v12.4s\n"
+ "str q1, [x19]\n"
+ "fmax v2.4s, v2.4s, v20.4s\n"
+ "fmla v0.4s, v14.4s, v8.4s\n"
+ "str q2, [%[outptr0], %[output_col_stride1]]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "fmla v0.4s, v17.4s, v6.4s\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "fmax v0.4s, v0.4s, v20.4s\n"
+ "str q0, [x19, %[output_col_stride1]]\n"
+ "add x19, x19, #16\n"
+ "4:\n"
+ "cbz x20, 7f\n"
+ "ldr s16, [%[wbptr]]\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr s4, [%[wbptr], #4]\n"
+ "mov v1.16b, v16.16b\n"
+ "ldr s5, [%[wbptr], #8]\n"
+ "mov v2.16b, v16.16b\n"
+ "ldr s12, [%[wbptr], #12]\n"
+ "mov v0.16b, v16.16b\n"
+ "ldr s11, [%[wbptr], #16]\n"
+ "ldr s10, [%[wbptr], #20]\n"
+ "subs x20, x20, #1\n"
+ "ldr s6, [%[wbptr], #24]\n"
+ "ldr s9, [%[wbptr], #28]\n"
+ "ldr s8, [%[wbptr], #32]\n"
+ "ldr s7, [%[wbptr], #36]\n"
+ "ldr s21, [%[inptr0]]\n"
+ "ldr s23, [x24]\n"
+ "fmla v3.4s, v21.4s, v4.4s\n"
+ "ldr s19, [%[inptr0], %[input_col_stride1]]\n"
+ "ldr s14, [x25]\n"
+ "ldr s13, [x24, %[input_col_stride1]]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "ldr s18, [%[inptr0], x27]\n"
+ "fmla v3.4s, v23.4s, v11.4s\n"
+ "ldr s15, [x28]\n"
+ "ldr s22, [x25, %[input_col_stride1]]\n"
+ "fmla v3.4s, v19.4s, v5.4s\n"
+ "fmla v3.4s, v14.4s, v9.4s\n"
+ "beq 6f\n"
+ "5:\n"
+ "fmla v3.4s, v13.4s, v10.4s\n"
+ "ldr s17, [x24, x27]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr s20, [%[inptr0], x23]\n"
+ "fmla v1.4s, v15.4s, v11.4s\n"
+ "ldr s19, [x26]\n"
+ "fmla v3.4s, v18.4s, v12.4s\n"
+ "ldr s13, [x28, %[input_col_stride1]]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr s14, [x25, x27]\n"
+ "fmla v1.4s, v22.4s, v5.4s\n"
+ "ldr s15, [x24, x23]\n"
+ "fmla v3.4s, v22.4s, v8.4s\n"
+ "ldr s16, [%[inptr0], x22]\n"
+ "fmla v2.4s, v20.4s, v5.4s\n"
+ "ldr s20, [x26, %[input_col_stride1]]\n"
+ "fmla v1.4s, v19.4s, v9.4s\n"
+ "ldr s19, [x28, x27]\n"
+ "fmla v3.4s, v17.4s, v6.4s\n"
+ "ldr s21, [x25, x23]\n"
+ "fmla v2.4s, v14.4s, v9.4s\n"
+ "ldr s22, [x24, x22]\n"
+ "fmla v1.4s, v13.4s, v10.4s\n"
+ "ldr s23, [x26, x27]\n"
+ "fmla v3.4s, v14.4s, v7.4s\n"
+ "ldr s18, [x28, x23]\n"
+ "fmla v0.4s, v14.4s, v4.4s\n"
+ "ldr s13, [x25, x22]\n"
+ "fmla v1.4s, v14.4s, v12.4s\n"
+ "ldr s14, [x26, x23]\n"
+ "fmla v2.4s, v15.4s, v10.4s\n"
+ "ldr s17, [x28, x22]\n"
+ "fmla v0.4s, v19.4s, v11.4s\n"
+ "ldr s15, [x26, x22]\n"
+ "fmla v1.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "ldr s16, [%[wbptr]]\n"
+ "fmla v0.4s, v21.4s, v5.4s\n"
+ "ldr s4, [%[wbptr], #4]\n"
+ "fmla v1.4s, v19.4s, v6.4s\n"
+ "ldr s11, [%[wbptr], #16]\n"
+ "fmla v2.4s, v21.4s, v8.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v0.4s, v23.4s, v9.4s\n"
+ "ldr s5, [%[wbptr], #8]\n"
+ "fmla v1.4s, v23.4s, v7.4s\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v2.4s, v22.4s, v6.4s\n"
+ "ldr s21, [%[inptr0]]\n"
+ "fmla v0.4s, v18.4s, v10.4s\n"
+ "ldr s9, [%[wbptr], #28]\n"
+ "movi v20.16b, #0\n"
+ "ldr s19, [%[inptr0], %[input_col_stride1]]\n"
+ "fmla v2.4s, v13.4s, v7.4s\n"
+ "ldr s18, [%[inptr0], x27]\n"
+ "fmla v0.4s, v13.4s, v12.4s\n"
+ "ldr s10, [%[wbptr], #20]\n"
+ "fmax v3.4s, v3.4s, v20.4s\n"
+ "add x24, x24, #4\n"
+ "fmax v2.4s, v2.4s, v20.4s\n"
+ "ldr s23, [x24]\n"
+ "str s3, [%[outptr0]]\n"
+ "fmla v0.4s, v14.4s, v8.4s\n"
+ "str s2, [%[outptr0], %[output_col_stride1]]\n"
+ "fmax v1.4s, v1.4s, v20.4s\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr s12, [%[wbptr], #12]\n"
+ "str s1, [x19]\n"
+ "fmla v0.4s, v17.4s, v6.4s\n"
+ "mov v1.16b, v16.16b\n"
+ "ldr s8, [%[wbptr], #32]\n"
+ "mov v2.16b, v16.16b\n"
+ "ldr s13, [x24, %[input_col_stride1]]\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "ldr s6, [%[wbptr], #24]\n"
+ "fmla v3.4s, v21.4s, v4.4s\n"
+ "add x25, x25, #4\n"
+ "ldr s14, [x25]\n"
+ "add x28, x28, #4\n"
+ "fmax v0.4s, v0.4s, v20.4s\n"
+ "ldr s7, [%[wbptr], #36]\n"
+ "fmla v3.4s, v23.4s, v11.4s\n"
+ "ldr s15, [x28]\n"
+ "str s0, [x19, %[output_col_stride1]]\n"
+ "fmla v1.4s, v14.4s, v4.4s\n"
+ "mov v0.16b, v16.16b\n"
+ "ldr s22, [x25, %[input_col_stride1]]\n"
+ "fmla v3.4s, v19.4s, v5.4s\n"
+ "add x26, x26, #4\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "add x19, x19, #4\n"
+ "subs x20, x20, #1\n"
+ "fmla v3.4s, v14.4s, v9.4s\n"
+ "bne 5b\n"
+ "6:\n"
+ "fmla v3.4s, v13.4s, v10.4s\n"
+ "ldr s17, [x24, x27]\n"
+ "fmla v2.4s, v18.4s, v4.4s\n"
+ "ldr s20, [%[inptr0], x23]\n"
+ "fmla v1.4s, v15.4s, v11.4s\n"
+ "ldr s19, [x26]\n"
+ "fmla v3.4s, v18.4s, v12.4s\n"
+ "ldr s13, [x28, %[input_col_stride1]]\n"
+ "fmla v2.4s, v17.4s, v11.4s\n"
+ "ldr s14, [x25, x27]\n"
+ "fmla v1.4s, v22.4s, v5.4s\n"
+ "ldr s15, [x24, x23]\n"
+ "fmla v3.4s, v22.4s, v8.4s\n"
+ "ldr s16, [%[inptr0], x22]\n"
+ "fmla v2.4s, v20.4s, v5.4s\n"
+ "ldr s20, [x26, %[input_col_stride1]]\n"
+ "fmla v1.4s, v19.4s, v9.4s\n"
+ "ldr s19, [x28, x27]\n"
+ "fmla v3.4s, v17.4s, v6.4s\n"
+ "ldr s21, [x25, x23]\n"
+ "fmla v2.4s, v14.4s, v9.4s\n"
+ "ldr s22, [x24, x22]\n"
+ "fmla v1.4s, v13.4s, v10.4s\n"
+ "ldr s23, [x26, x27]\n"
+ "fmla v3.4s, v14.4s, v7.4s\n"
+ "ldr s18, [x28, x23]\n"
+ "fmla v0.4s, v14.4s, v4.4s\n"
+ "ldr s13, [x25, x22]\n"
+ "fmla v1.4s, v14.4s, v12.4s\n"
+ "ldr s14, [x26, x23]\n"
+ "fmla v2.4s, v15.4s, v10.4s\n"
+ "ldr s17, [x28, x22]\n"
+ "fmla v0.4s, v19.4s, v11.4s\n"
+ "ldr s15, [x26, x22]\n"
+ "fmla v1.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v2.4s, v16.4s, v12.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v0.4s, v21.4s, v5.4s\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v1.4s, v19.4s, v6.4s\n"
+ "add x24, x24, #4\n"
+ "fmla v2.4s, v21.4s, v8.4s\n"
+ "add x25, x25, #4\n"
+ "fmla v0.4s, v23.4s, v9.4s\n"
+ "add x28, x28, #4\n"
+ "fmla v1.4s, v23.4s, v7.4s\n"
+ "add x26, x26, #4\n"
+ "fmla v2.4s, v22.4s, v6.4s\n"
+ "movi v20.16b, #0\n"
+ "fmla v0.4s, v18.4s, v10.4s\n"
+ "fmax v3.4s, v3.4s, v20.4s\n"
+ "fmla v2.4s, v13.4s, v7.4s\n"
+ "fmax v1.4s, v1.4s, v20.4s\n"
+ "str s3, [%[outptr0]]\n"
+ "fmla v0.4s, v13.4s, v12.4s\n"
+ "str s1, [x19]\n"
+ "fmax v2.4s, v2.4s, v20.4s\n"
+ "fmla v0.4s, v14.4s, v8.4s\n"
+ "str s2, [%[outptr0], %[output_col_stride1]]\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "fmla v0.4s, v17.4s, v6.4s\n"
+ "fmla v0.4s, v15.4s, v7.4s\n"
+ "fmax v0.4s, v0.4s, v20.4s\n"
+ "str s0, [x19, %[output_col_stride1]]\n"
+ "add x19, x19, #4\n"
+ "7:\n"
+ : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input)
+ : [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU6>(
+ int n_channels,
+ const void *weight_bias_ptr,
+ const float *input,
+ const unsigned int input_row_stride,
+ const unsigned int input_col_stride,
+ float *output,
+ const unsigned int output_row_stride,
+ const unsigned int output_col_stride
+)
+{
+ __asm __volatile(
+ "add x21, %[inptr0], %[input_row_stride]\n"
+ "add x23, %[input_col_stride1], %[input_col_stride1]\n"
+ "add x24, %[outptr0], %[output_row_stride]\n"
+ "add x28, x21, %[input_row_stride]\n"
+ "add x26, x23, %[input_col_stride1]\n"
+ "and x19, %[n_channels], #3\n"
+ "add x27, x28, %[input_row_stride]\n"
+ "add x25, x26, %[input_col_stride1]\n"
+ "lsr x20, %[n_channels], #2\n"
+ "add x22, x27, %[input_row_stride]\n"
+ "cbz x20, 4f\n"
+ "1:\n"
+ "ldr q14, [%[wbptr]]\n"
+ "subs x20, x20, #1\n"
+ "mov v5.16b, v14.16b\n"
+ "ldr q0, [%[wbptr], #16]\n"
+ "mov v11.16b, v14.16b\n"
+ "ldr q1, [%[wbptr], #32]\n"
+ "mov v12.16b, v14.16b\n"
+ "ldr q2, [%[wbptr], #48]\n"
+ "mov v10.16b, v14.16b\n"
+ "ldr q6, [%[wbptr], #64]\n"
+ "ldr q3, [%[wbptr], #80]\n"
+ "ldr q7, [%[wbptr], #96]\n"
+ "ldr q4, [%[wbptr], #112]\n"
+ "ldr q8, [%[wbptr], #128]\n"
+ "ldr q9, [%[wbptr], #144]\n"
+ "ldr q19, [%[inptr0]]\n"
+ "fmla v5.4s, v19.4s, v0.4s\n"
+ "ldr q15, [x21]\n"
+ "ldr q21, [%[inptr0], %[input_col_stride1]]\n"
+ "ldr q16, [x28]\n"
+ "fmla v11.4s, v16.4s, v0.4s\n"
+ "ldr q23, [x21, %[input_col_stride1]]\n"
+ "fmla v5.4s, v15.4s, v6.4s\n"
+ "ldr q18, [%[inptr0], x23]\n"
+ "ldr q17, [x27]\n"
+ "ldr q13, [x28, %[input_col_stride1]]\n"
+ "fmla v5.4s, v21.4s, v1.4s\n"
+ "fmla v5.4s, v16.4s, v4.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fmla v5.4s, v23.4s, v3.4s\n"
+ "ldr q21, [x21, x23]\n"
+ "fmla v12.4s, v18.4s, v0.4s\n"
+ "ldr q20, [%[inptr0], x26]\n"
+ "fmla v11.4s, v17.4s, v6.4s\n"
+ "ldr q19, [x22]\n"
+ "fmla v5.4s, v18.4s, v2.4s\n"
+ "ldr q15, [x27, %[input_col_stride1]]\n"
+ "fmla v12.4s, v21.4s, v6.4s\n"
+ "ldr q16, [x28, x23]\n"
+ "fmla v11.4s, v13.4s, v1.4s\n"
+ "ldr q17, [x21, x26]\n"
+ "fmla v5.4s, v13.4s, v8.4s\n"
+ "ldr q14, [%[inptr0], x25]\n"
+ "fmla v12.4s, v20.4s, v1.4s\n"
+ "ldr q20, [x22, %[input_col_stride1]]\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr q19, [x27, x23]\n"
+ "fmla v5.4s, v21.4s, v7.4s\n"
+ "ldr q22, [x28, x26]\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "ldr q21, [x21, x25]\n"
+ "fmla v11.4s, v15.4s, v3.4s\n"
+ "ldr q23, [x22, x23]\n"
+ "fmla v5.4s, v16.4s, v9.4s\n"
+ "ldr q18, [x27, x26]\n"
+ "fmla v10.4s, v16.4s, v0.4s\n"
+ "ldr q15, [x28, x25]\n"
+ "fmla v11.4s, v16.4s, v2.4s\n"
+ "ldr q16, [x22, x26]\n"
+ "fmla v12.4s, v17.4s, v3.4s\n"
+ "ldr q17, [x27, x25]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "ldr q13, [x22, x25]\n"
+ "fmla v11.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v12.4s, v14.4s, v2.4s\n"
+ "ldr q14, [%[wbptr]]\n"
+ "fmla v10.4s, v22.4s, v1.4s\n"
+ "ldr q0, [%[wbptr], #16]\n"
+ "fmla v11.4s, v19.4s, v7.4s\n"
+ "ldr q6, [%[wbptr], #64]\n"
+ "fmla v12.4s, v22.4s, v8.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v10.4s, v23.4s, v4.4s\n"
+ "ldr q1, [%[wbptr], #32]\n"
+ "fmla v11.4s, v23.4s, v9.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fmla v12.4s, v21.4s, v7.4s\n"
+ "ldr q19, [%[inptr0]]\n"
+ "fmla v10.4s, v18.4s, v3.4s\n"
+ "ldr q4, [%[wbptr], #112]\n"
+ "movi v20.16b, #0\n"
+ "ldr q21, [%[inptr0], %[input_col_stride1]]\n"
+ "fmla v12.4s, v15.4s, v9.4s\n"
+ "ldr q18, [%[inptr0], x23]\n"
+ "fmla v10.4s, v15.4s, v2.4s\n"
+ "ldr q3, [%[wbptr], #80]\n"
+ "fmov v22.4s, #6.0\n"
+ "add x21, x21, #16\n"
+ "fmax v5.4s, v5.4s, v20.4s\n"
+ "ldr q15, [x21]\n"
+ "fmla v10.4s, v16.4s, v8.4s\n"
+ "ldr q2, [%[wbptr], #48]\n"
+ "fmin v5.4s, v5.4s, v22.4s\n"
+ "ldr q23, [x21, %[input_col_stride1]]\n"
+ "fmax v12.4s, v12.4s, v20.4s\n"
+ "add x28, x28, #16\n"
+ "str q5, [%[outptr0]]\n"
+ "fmla v10.4s, v17.4s, v7.4s\n"
+ "fmin v12.4s, v12.4s, v22.4s\n"
+ "ldr q8, [%[wbptr], #128]\n"
+ "fmax v11.4s, v11.4s, v20.4s\n"
+ "ldr q16, [x28]\n"
+ "str q12, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v10.4s, v13.4s, v9.4s\n"
+ "fmin v11.4s, v11.4s, v22.4s\n"
+ "ldr q7, [%[wbptr], #96]\n"
+ "mov v5.16b, v14.16b\n"
+ "ldr q13, [x28, %[input_col_stride1]]\n"
+ "str q11, [x24]\n"
+ "fmax v10.4s, v10.4s, v20.4s\n"
+ "mov v11.16b, v14.16b\n"
+ "ldr q9, [%[wbptr], #144]\n"
+ "fmin v10.4s, v10.4s, v22.4s\n"
+ "add x27, x27, #16\n"
+ "mov v12.16b, v14.16b\n"
+ "ldr q17, [x27]\n"
+ "str q10, [x24, %[output_col_stride1]]\n"
+ "fmla v5.4s, v19.4s, v0.4s\n"
+ "mov v10.16b, v14.16b\n"
+ "add x22, x22, #16\n"
+ "fmla v11.4s, v16.4s, v0.4s\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "fmla v5.4s, v15.4s, v6.4s\n"
+ "add x24, x24, #16\n"
+ "subs x20, x20, #1\n"
+ "fmla v5.4s, v21.4s, v1.4s\n"
+ "fmla v5.4s, v16.4s, v4.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fmla v5.4s, v23.4s, v3.4s\n"
+ "ldr q21, [x21, x23]\n"
+ "fmla v12.4s, v18.4s, v0.4s\n"
+ "ldr q20, [%[inptr0], x26]\n"
+ "fmla v11.4s, v17.4s, v6.4s\n"
+ "ldr q19, [x22]\n"
+ "fmla v5.4s, v18.4s, v2.4s\n"
+ "ldr q15, [x27, %[input_col_stride1]]\n"
+ "fmla v12.4s, v21.4s, v6.4s\n"
+ "ldr q16, [x28, x23]\n"
+ "fmla v11.4s, v13.4s, v1.4s\n"
+ "ldr q17, [x21, x26]\n"
+ "fmla v5.4s, v13.4s, v8.4s\n"
+ "ldr q14, [%[inptr0], x25]\n"
+ "fmla v12.4s, v20.4s, v1.4s\n"
+ "ldr q20, [x22, %[input_col_stride1]]\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr q19, [x27, x23]\n"
+ "fmla v5.4s, v21.4s, v7.4s\n"
+ "ldr q22, [x28, x26]\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "ldr q21, [x21, x25]\n"
+ "fmla v11.4s, v15.4s, v3.4s\n"
+ "ldr q23, [x22, x23]\n"
+ "fmla v5.4s, v16.4s, v9.4s\n"
+ "ldr q18, [x27, x26]\n"
+ "fmla v10.4s, v16.4s, v0.4s\n"
+ "ldr q15, [x28, x25]\n"
+ "fmla v11.4s, v16.4s, v2.4s\n"
+ "ldr q16, [x22, x26]\n"
+ "fmla v12.4s, v17.4s, v3.4s\n"
+ "ldr q17, [x27, x25]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "ldr q13, [x22, x25]\n"
+ "fmla v11.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #160\n"
+ "fmla v12.4s, v14.4s, v2.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v10.4s, v22.4s, v1.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fmla v11.4s, v19.4s, v7.4s\n"
+ "add x21, x21, #16\n"
+ "fmla v12.4s, v22.4s, v8.4s\n"
+ "add x28, x28, #16\n"
+ "fmla v10.4s, v23.4s, v4.4s\n"
+ "add x27, x27, #16\n"
+ "fmla v11.4s, v23.4s, v9.4s\n"
+ "add x22, x22, #16\n"
+ "fmla v12.4s, v21.4s, v7.4s\n"
+ "movi v20.16b, #0\n"
+ "fmla v10.4s, v18.4s, v3.4s\n"
+ "fmov v22.4s, #6.0\n"
+ "fmax v5.4s, v5.4s, v20.4s\n"
+ "fmax v11.4s, v11.4s, v20.4s\n"
+ "fmla v12.4s, v15.4s, v9.4s\n"
+ "fmla v10.4s, v15.4s, v2.4s\n"
+ "fmin v5.4s, v5.4s, v22.4s\n"
+ "fmin v11.4s, v11.4s, v22.4s\n"
+ "fmax v12.4s, v12.4s, v20.4s\n"
+ "str q5, [%[outptr0]]\n"
+ "str q11, [x24]\n"
+ "fmla v10.4s, v16.4s, v8.4s\n"
+ "fmin v12.4s, v12.4s, v22.4s\n"
+ "str q12, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v10.4s, v17.4s, v7.4s\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "fmla v10.4s, v13.4s, v9.4s\n"
+ "fmax v10.4s, v10.4s, v20.4s\n"
+ "fmin v10.4s, v10.4s, v22.4s\n"
+ "str q10, [x24, %[output_col_stride1]]\n"
+ "add x24, x24, #16\n"
+ "4:\n"
+ "cbz x19, 7f\n"
+ "ldr s14, [%[wbptr]]\n"
+ "mov v5.16b, v14.16b\n"
+ "ldr s0, [%[wbptr], #4]\n"
+ "mov v11.16b, v14.16b\n"
+ "ldr s1, [%[wbptr], #8]\n"
+ "mov v12.16b, v14.16b\n"
+ "ldr s2, [%[wbptr], #12]\n"
+ "mov v10.16b, v14.16b\n"
+ "ldr s6, [%[wbptr], #16]\n"
+ "ldr s3, [%[wbptr], #20]\n"
+ "subs x19, x19, #1\n"
+ "ldr s7, [%[wbptr], #24]\n"
+ "ldr s4, [%[wbptr], #28]\n"
+ "ldr s8, [%[wbptr], #32]\n"
+ "ldr s9, [%[wbptr], #36]\n"
+ "ldr s19, [%[inptr0]]\n"
+ "ldr s15, [x21]\n"
+ "fmla v5.4s, v19.4s, v0.4s\n"
+ "ldr s21, [%[inptr0], %[input_col_stride1]]\n"
+ "ldr s16, [x28]\n"
+ "ldr s23, [x21, %[input_col_stride1]]\n"
+ "fmla v11.4s, v16.4s, v0.4s\n"
+ "ldr s18, [%[inptr0], x23]\n"
+ "fmla v5.4s, v15.4s, v6.4s\n"
+ "ldr s17, [x27]\n"
+ "ldr s13, [x28, %[input_col_stride1]]\n"
+ "fmla v5.4s, v21.4s, v1.4s\n"
+ "fmla v5.4s, v16.4s, v4.4s\n"
+ "beq 6f\n"
+ "5:\n"
+ "fmla v5.4s, v23.4s, v3.4s\n"
+ "ldr s21, [x21, x23]\n"
+ "fmla v12.4s, v18.4s, v0.4s\n"
+ "ldr s20, [%[inptr0], x26]\n"
+ "fmla v11.4s, v17.4s, v6.4s\n"
+ "ldr s19, [x22]\n"
+ "fmla v5.4s, v18.4s, v2.4s\n"
+ "ldr s15, [x27, %[input_col_stride1]]\n"
+ "fmla v12.4s, v21.4s, v6.4s\n"
+ "ldr s16, [x28, x23]\n"
+ "fmla v11.4s, v13.4s, v1.4s\n"
+ "ldr s17, [x21, x26]\n"
+ "fmla v5.4s, v13.4s, v8.4s\n"
+ "ldr s14, [%[inptr0], x25]\n"
+ "fmla v12.4s, v20.4s, v1.4s\n"
+ "ldr s20, [x22, %[input_col_stride1]]\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr s19, [x27, x23]\n"
+ "fmla v5.4s, v21.4s, v7.4s\n"
+ "ldr s22, [x28, x26]\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "ldr s21, [x21, x25]\n"
+ "fmla v11.4s, v15.4s, v3.4s\n"
+ "ldr s23, [x22, x23]\n"
+ "fmla v5.4s, v16.4s, v9.4s\n"
+ "ldr s18, [x27, x26]\n"
+ "fmla v10.4s, v16.4s, v0.4s\n"
+ "ldr s15, [x28, x25]\n"
+ "fmla v11.4s, v16.4s, v2.4s\n"
+ "ldr s16, [x22, x26]\n"
+ "fmla v12.4s, v17.4s, v3.4s\n"
+ "ldr s17, [x27, x25]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "ldr s13, [x22, x25]\n"
+ "fmla v11.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v12.4s, v14.4s, v2.4s\n"
+ "ldr s14, [%[wbptr]]\n"
+ "fmla v10.4s, v22.4s, v1.4s\n"
+ "ldr s0, [%[wbptr], #4]\n"
+ "fmla v11.4s, v19.4s, v7.4s\n"
+ "ldr s6, [%[wbptr], #16]\n"
+ "fmla v12.4s, v22.4s, v8.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v10.4s, v23.4s, v4.4s\n"
+ "ldr s1, [%[wbptr], #8]\n"
+ "fmla v11.4s, v23.4s, v9.4s\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v12.4s, v21.4s, v7.4s\n"
+ "ldr s19, [%[inptr0]]\n"
+ "fmla v10.4s, v18.4s, v3.4s\n"
+ "ldr s4, [%[wbptr], #28]\n"
+ "movi v20.16b, #0\n"
+ "ldr s21, [%[inptr0], %[input_col_stride1]]\n"
+ "fmla v12.4s, v15.4s, v9.4s\n"
+ "ldr s18, [%[inptr0], x23]\n"
+ "fmla v10.4s, v15.4s, v2.4s\n"
+ "ldr s3, [%[wbptr], #20]\n"
+ "fmov v22.4s, #6.0\n"
+ "add x21, x21, #4\n"
+ "fmax v5.4s, v5.4s, v20.4s\n"
+ "ldr s15, [x21]\n"
+ "fmla v10.4s, v16.4s, v8.4s\n"
+ "ldr s2, [%[wbptr], #12]\n"
+ "fmin v5.4s, v5.4s, v22.4s\n"
+ "ldr s23, [x21, %[input_col_stride1]]\n"
+ "fmax v12.4s, v12.4s, v20.4s\n"
+ "add x28, x28, #4\n"
+ "str s5, [%[outptr0]]\n"
+ "fmla v10.4s, v17.4s, v7.4s\n"
+ "fmin v12.4s, v12.4s, v22.4s\n"
+ "ldr s8, [%[wbptr], #32]\n"
+ "fmax v11.4s, v11.4s, v20.4s\n"
+ "ldr s16, [x28]\n"
+ "str s12, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v10.4s, v13.4s, v9.4s\n"
+ "fmin v11.4s, v11.4s, v22.4s\n"
+ "ldr s7, [%[wbptr], #24]\n"
+ "mov v5.16b, v14.16b\n"
+ "ldr s13, [x28, %[input_col_stride1]]\n"
+ "str s11, [x24]\n"
+ "fmax v10.4s, v10.4s, v20.4s\n"
+ "mov v11.16b, v14.16b\n"
+ "ldr s9, [%[wbptr], #36]\n"
+ "fmin v10.4s, v10.4s, v22.4s\n"
+ "add x27, x27, #4\n"
+ "mov v12.16b, v14.16b\n"
+ "ldr s17, [x27]\n"
+ "str s10, [x24, %[output_col_stride1]]\n"
+ "fmla v5.4s, v19.4s, v0.4s\n"
+ "mov v10.16b, v14.16b\n"
+ "add x22, x22, #4\n"
+ "fmla v11.4s, v16.4s, v0.4s\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "fmla v5.4s, v15.4s, v6.4s\n"
+ "add x24, x24, #4\n"
+ "subs x19, x19, #1\n"
+ "fmla v5.4s, v21.4s, v1.4s\n"
+ "fmla v5.4s, v16.4s, v4.4s\n"
+ "bne 5b\n"
+ "6:\n"
+ "fmla v5.4s, v23.4s, v3.4s\n"
+ "ldr s21, [x21, x23]\n"
+ "fmla v12.4s, v18.4s, v0.4s\n"
+ "ldr s20, [%[inptr0], x26]\n"
+ "fmla v11.4s, v17.4s, v6.4s\n"
+ "ldr s19, [x22]\n"
+ "fmla v5.4s, v18.4s, v2.4s\n"
+ "ldr s15, [x27, %[input_col_stride1]]\n"
+ "fmla v12.4s, v21.4s, v6.4s\n"
+ "ldr s16, [x28, x23]\n"
+ "fmla v11.4s, v13.4s, v1.4s\n"
+ "ldr s17, [x21, x26]\n"
+ "fmla v5.4s, v13.4s, v8.4s\n"
+ "ldr s14, [%[inptr0], x25]\n"
+ "fmla v12.4s, v20.4s, v1.4s\n"
+ "ldr s20, [x22, %[input_col_stride1]]\n"
+ "fmla v11.4s, v19.4s, v4.4s\n"
+ "ldr s19, [x27, x23]\n"
+ "fmla v5.4s, v21.4s, v7.4s\n"
+ "ldr s22, [x28, x26]\n"
+ "fmla v12.4s, v16.4s, v4.4s\n"
+ "ldr s21, [x21, x25]\n"
+ "fmla v11.4s, v15.4s, v3.4s\n"
+ "ldr s23, [x22, x23]\n"
+ "fmla v5.4s, v16.4s, v9.4s\n"
+ "ldr s18, [x27, x26]\n"
+ "fmla v10.4s, v16.4s, v0.4s\n"
+ "ldr s15, [x28, x25]\n"
+ "fmla v11.4s, v16.4s, v2.4s\n"
+ "ldr s16, [x22, x26]\n"
+ "fmla v12.4s, v17.4s, v3.4s\n"
+ "ldr s17, [x27, x25]\n"
+ "fmla v10.4s, v19.4s, v6.4s\n"
+ "ldr s13, [x22, x25]\n"
+ "fmla v11.4s, v20.4s, v8.4s\n"
+ "add %[wbptr], %[wbptr], #40\n"
+ "fmla v12.4s, v14.4s, v2.4s\n"
+ "prfm pldl1keep, [%[wbptr], #64]\n"
+ "fmla v10.4s, v22.4s, v1.4s\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v11.4s, v19.4s, v7.4s\n"
+ "add x21, x21, #4\n"
+ "fmla v12.4s, v22.4s, v8.4s\n"
+ "add x28, x28, #4\n"
+ "fmla v10.4s, v23.4s, v4.4s\n"
+ "add x27, x27, #4\n"
+ "fmla v11.4s, v23.4s, v9.4s\n"
+ "add x22, x22, #4\n"
+ "fmla v12.4s, v21.4s, v7.4s\n"
+ "movi v20.16b, #0\n"
+ "fmla v10.4s, v18.4s, v3.4s\n"
+ "fmov v22.4s, #6.0\n"
+ "fmax v5.4s, v5.4s, v20.4s\n"
+ "fmax v11.4s, v11.4s, v20.4s\n"
+ "fmla v12.4s, v15.4s, v9.4s\n"
+ "fmla v10.4s, v15.4s, v2.4s\n"
+ "fmin v5.4s, v5.4s, v22.4s\n"
+ "fmin v11.4s, v11.4s, v22.4s\n"
+ "fmax v12.4s, v12.4s, v20.4s\n"
+ "str s5, [%[outptr0]]\n"
+ "str s11, [x24]\n"
+ "fmla v10.4s, v16.4s, v8.4s\n"
+ "fmin v12.4s, v12.4s, v22.4s\n"
+ "str s12, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v10.4s, v17.4s, v7.4s\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "fmla v10.4s, v13.4s, v9.4s\n"
+ "fmax v10.4s, v10.4s, v20.4s\n"
+ "fmin v10.4s, v10.4s, v22.4s\n"
+ "str s10, [x24, %[output_col_stride1]]\n"
+ "add x24, x24, #4\n"
+ "7:\n"
+ : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr)
+ : [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
+
+#endif // __aarch64__
+
+template class DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>;
-template class DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float>;
} // namespace depthwise