From 8f43d745b170aefca269a087fc045d8af3813c33 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Wed, 27 Mar 2019 09:28:32 +0000 Subject: COMPMID-2063: New Winograd implementation Refactoring of winograd code reducing the size of the binaries about 8X. Change-Id: If8845bda324573e1a5cf436f354ac8603e88a92e Signed-off-by: Pablo Tello Reviewed-on: https://review.mlplatform.org/c/959 Comments-Addressed: Arm Jenkins Tested-by: Anthony Barbier Reviewed-by: Georgios Pinitas --- .../output_4x4_3x3_fp32_fp32_integers.cpp | 1855 ++++++++++++++++++++ 1 file changed, 1855 insertions(+) create mode 100644 src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp (limited to 'src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp') diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp new file mode 100644 index 0000000000..fd16a4df1c --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp @@ -0,0 +1,1855 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm.hpp" +#include "output.hpp" + +namespace winograd +{ + +#ifdef __aarch64__ + +template <> +void OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile( + int n_channels, + const float* inptr, + const int matrix_stride, + const float* bptr, + float* output, + const int output_row_stride, + const int output_col_stride +) +{ + const float coeffs[2] = {2.0f, 4.0f}; + if (bptr != nullptr) + { + __asm__ __volatile__ ( + "ldr d0, [%[pcoeffs]]\n" + "add x21, %[in_col_stride1], %[in_col_stride1]\n" + "add x22, x21, %[in_col_stride1]\n" + "add x25, %[inptr0], %[in_row_stride]\n" + "add x15, %[output_col_stride1], %[output_col_stride1]\n" + "add x23, x22, %[in_col_stride1]\n" + "add x13, x25, %[in_row_stride]\n" + "add x16, x15, %[output_col_stride1]\n" + "add x24, x23, %[in_col_stride1]\n" + "add x26, x13, %[in_row_stride]\n" + "add x17, %[outptr0], %[output_row_stride]\n" + "add x14, x26, %[in_row_stride]\n" + "add x28, x17, %[output_row_stride]\n" + "lsr x19, %[n_channels], #2\n" + "add x27, x14, %[in_row_stride]\n" + "add x18, x28, %[output_row_stride]\n" + "and x20, %[n_channels], #3\n" + "cbz x19, 4f\n" + "1:\n" + "ldr q19, [%[inptr0]]\n" + "subs x19, x19, #1\n" + "ldr q20, [%[inptr0], %[in_col_stride1]]\n" + "ldr q4, [%[inptr0], x21]\n" + "fadd v1.4s, v20.4s, v4.4s\n" + "ldr q17, [%[inptr0], x22]\n" + "fsub v7.4s, v20.4s, v4.4s\n" + "ldr q22, [%[inptr0], x23]\n" + "fadd v5.4s, v17.4s, v22.4s\n" + "ldr q18, [%[inptr0], x24]\n" + "fsub v10.4s, v17.4s, v22.4s\n" + "ldr q25, [x25]\n" + "fadd v8.4s, v19.4s, v1.4s\n" + "ldr q12, [x25, %[in_col_stride1]]\n" + "mov v4.16b, v1.16b\n" + "ldr q23, [x25, x21]\n" + "mov v1.16b, v7.16b\n" + "ldr q9, [x25, x22]\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "ldr q11, [x25, x23]\n" + "fadd v8.4s, v8.4s, v5.4s\n" + "ldr q6, [x25, x24]\n" + "fmla v4.4s, v5.4s, v0.s[1]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "fmla v1.4s, v10.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v18.4s\n" + "beq 3f\n" + "2:\n" + "fadd v3.4s, v12.4s, v23.4s\n" + "ldr q2, [x13]\n" + "fadd v27.4s, v9.4s, v11.4s\n" + "ldr q21, [x13, %[in_col_stride1]]\n" + "fsub v16.4s, v12.4s, v23.4s\n" + "ldr q26, [x13, x21]\n" + "fsub v9.4s, v9.4s, v11.4s\n" + "ldr q17, [x13, x22]\n" + "fadd v14.4s, v25.4s, v3.4s\n" + "ldr q19, [x13, x23]\n" + "mov v11.16b, v3.16b\n" + "ldr q10, [x13, x24]\n" + "mov v3.16b, v16.16b\n" + "ldr q15, [x26]\n" + "fmul v9.4s, v9.4s, v0.s[0]\n" + "ldr q12, [x26, %[in_col_stride1]]\n" + "fadd v14.4s, v14.4s, v27.4s\n" + "ldr q20, [x26, x21]\n" + "fmla v11.4s, v27.4s, v0.s[1]\n" + "ldr q24, [x26, x22]\n" + "fadd v23.4s, v21.4s, v26.4s\n" + "ldr q29, [x26, x23]\n" + "fadd v13.4s, v16.4s, v9.4s\n" + "ldr q5, [x26, x24]\n" + "fmla v3.4s, v9.4s, v0.s[1]\n" + "ldr q18, [x14]\n" + "fadd v30.4s, v17.4s, v19.4s\n" + "add %[inptr0], %[inptr0], #16\n" + "fadd v16.4s, v2.4s, v23.4s\n" + "add x25, x25, #16\n" + "fsub v21.4s, v21.4s, v26.4s\n" + "ldr q22, [x14, %[in_col_stride1]]\n" + "fadd v3.4s, v3.4s, v6.4s\n" + "ldr q28, [x14, x21]\n" + "fsub v19.4s, v17.4s, v19.4s\n" + "add x13, x13, #16\n" + "fadd v16.4s, v16.4s, v30.4s\n" + "add x26, x26, #16\n" + "mov v17.16b, v23.16b\n" + "subs x19, x19, #1\n" + "fadd v26.4s, v12.4s, v20.4s\n" + "fsub v9.4s, v12.4s, v20.4s\n" + "fmul v19.4s, v19.4s, v0.s[0]\n" + "ldr q20, [x14, x22]\n" + "fmla v17.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v24.4s, v29.4s\n" + "fsub v12.4s, v24.4s, v29.4s\n" + "fadd v24.4s, v22.4s, v28.4s\n" + "fadd v23.4s, v15.4s, v26.4s\n" + "mov v15.16b, v26.16b\n" + "fsub v22.4s, v22.4s, v28.4s\n" + "fadd v29.4s, v14.4s, v16.4s\n" + "fsub v16.4s, v14.4s, v16.4s\n" + "ldr q28, [x14, x23]\n" + "fmul v12.4s, v12.4s, v0.s[0]\n" + "fmla v15.4s, v25.4s, v0.s[1]\n" + "fadd v23.4s, v23.4s, v25.4s\n" + "mov v6.16b, v21.16b\n" + "fadd v30.4s, v21.4s, v19.4s\n" + "fadd v26.4s, v18.4s, v24.4s\n" + "mov v25.16b, v24.16b\n" + "fadd v18.4s, v8.4s, v29.4s\n" + "fmla v6.4s, v19.4s, v0.s[1]\n" + "fadd v27.4s, v20.4s, v28.4s\n" + "fsub v21.4s, v20.4s, v28.4s\n" + "mov v19.16b, v29.16b\n" + "fadd v29.4s, v13.4s, v30.4s\n" + "fsub v8.4s, v13.4s, v30.4s\n" + "fadd v14.4s, v9.4s, v12.4s\n" + "fadd v6.4s, v6.4s, v10.4s\n" + "ldr q20, [x14, x24]\n" + "fadd v26.4s, v26.4s, v27.4s\n" + "add x14, x14, #16\n" + "fmla v9.4s, v12.4s, v0.s[1]\n" + "ldr q24, [x27]\n" + "fmul v21.4s, v21.4s, v0.s[0]\n" + "fmla v25.4s, v27.4s, v0.s[1]\n" + "fadd v10.4s, v7.4s, v29.4s\n" + "ldr q2, [%[bptr]]\n" + "mov v7.16b, v29.16b\n" + "add %[bptr], %[bptr], #16\n" + "fadd v9.4s, v9.4s, v5.4s\n" + "fadd v13.4s, v23.4s, v26.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "fadd v27.4s, v11.4s, v17.4s\n" + "fsub v11.4s, v11.4s, v17.4s\n" + "fadd v30.4s, v15.4s, v25.4s\n" + "fsub v15.4s, v15.4s, v25.4s\n" + "ldr q28, [x27, %[in_col_stride1]]\n" + "fadd v18.4s, v18.4s, v13.4s\n" + "fmla v19.4s, v13.4s, v0.s[1]\n" + "fadd v26.4s, v22.4s, v21.4s\n" + "mov v12.16b, v22.16b\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fadd v17.4s, v4.4s, v27.4s\n" + "fmul v15.4s, v15.4s, v0.s[0]\n" + "mov v4.16b, v27.16b\n" + "fmla v12.4s, v21.4s, v0.s[1]\n" + "ldr q22, [x27, x21]\n" + "fadd v18.4s, v18.4s, v2.4s\n" + "fadd v19.4s, v19.4s, v2.4s\n" + "fadd v17.4s, v17.4s, v30.4s\n" + "fmla v4.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v28.4s, v22.4s\n" + "fsub v27.4s, v28.4s, v22.4s\n" + "fadd v12.4s, v12.4s, v20.4s\n" + "ldr q29, [x27, x22]\n" + "str q18, [%[outptr0]]\n" + "fadd v22.4s, v16.4s, v23.4s\n" + "str q19, [x28]\n" + "fadd v28.4s, v24.4s, v25.4s\n" + "ldr q30, [x27, x23]\n" + "fadd v20.4s, v29.4s, v30.4s\n" + "fsub v18.4s, v29.4s, v30.4s\n" + "mov v21.16b, v25.16b\n" + "ldr q25, [x27, x24]\n" + "fmla v16.4s, v23.4s, v0.s[1]\n" + "ldr q19, [%[inptr0]]\n" + "fadd v17.4s, v17.4s, v2.4s\n" + "add x27, x27, #16\n" + "fadd v28.4s, v28.4s, v20.4s\n" + "fmul v18.4s, v18.4s, v0.s[0]\n" + "fmla v21.4s, v20.4s, v0.s[1]\n" + "ldr q20, [%[inptr0], %[in_col_stride1]]\n" + "fadd v22.4s, v22.4s, v2.4s\n" + "fadd v4.4s, v4.4s, v2.4s\n" + "str q17, [%[outptr0], x15]\n" + "mov v24.16b, v27.16b\n" + "fadd v23.4s, v27.4s, v18.4s\n" + "fadd v16.4s, v16.4s, v28.4s\n" + "fadd v13.4s, v14.4s, v26.4s\n" + "fsub v30.4s, v14.4s, v26.4s\n" + "str q22, [x17]\n" + "fmla v24.4s, v18.4s, v0.s[1]\n" + "str q4, [x28, x15]\n" + "mov v14.16b, v8.16b\n" + "fadd v29.4s, v11.4s, v15.4s\n" + "ldr q4, [%[inptr0], x21]\n" + "fadd v10.4s, v10.4s, v13.4s\n" + "ldr q17, [%[inptr0], x22]\n" + "fadd v24.4s, v24.4s, v25.4s\n" + "ldr q22, [%[inptr0], x23]\n" + "fmul v30.4s, v30.4s, v0.s[0]\n" + "fmla v7.4s, v13.4s, v0.s[1]\n" + "mov v26.16b, v11.16b\n" + "fadd v13.4s, v3.4s, v6.4s\n" + "fsub v3.4s, v3.4s, v6.4s\n" + "ldr q18, [%[inptr0], x24]\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "fadd v29.4s, v29.4s, v2.4s\n" + "fadd v8.4s, v8.4s, v30.4s\n" + "fmla v14.4s, v30.4s, v0.s[1]\n" + "fmla v26.4s, v15.4s, v0.s[1]\n" + "ldr q25, [x25]\n" + "fadd v27.4s, v9.4s, v12.4s\n" + "fadd v1.4s, v1.4s, v13.4s\n" + "str q10, [%[outptr0], %[output_col_stride1]]\n" + "fsub v6.4s, v9.4s, v12.4s\n" + "str q29, [x17, x15]\n" + "fadd v14.4s, v14.4s, v23.4s\n" + "fadd v26.4s, v26.4s, v21.4s\n" + "ldr q12, [x25, %[in_col_stride1]]\n" + "fadd v1.4s, v1.4s, v27.4s\n" + "ldr q23, [x25, x21]\n" + "fmul v6.4s, v6.4s, v0.s[0]\n" + "ldr q9, [x25, x22]\n" + "mov v5.16b, v13.16b\n" + "ldr q11, [x25, x23]\n" + "mov v13.16b, v3.16b\n" + "fadd v8.4s, v8.4s, v2.4s\n" + "fadd v1.4s, v1.4s, v2.4s\n" + "fadd v7.4s, v7.4s, v2.4s\n" + "fadd v10.4s, v3.4s, v6.4s\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "fmla v13.4s, v6.4s, v0.s[1]\n" + "ldr q6, [x25, x24]\n" + "str q8, [x17, %[output_col_stride1]]\n" + "fadd v16.4s, v16.4s, v2.4s\n" + "str q1, [%[outptr0], x16]\n" + "fadd v14.4s, v14.4s, v2.4s\n" + "str q7, [x28, %[output_col_stride1]]\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "fadd v13.4s, v13.4s, v24.4s\n" + "add %[outptr0], %[outptr0], #16\n" + "str q16, [x18]\n" + "fadd v5.4s, v5.4s, v2.4s\n" + "str q14, [x18, %[output_col_stride1]]\n" + "fadd v26.4s, v26.4s, v2.4s\n" + "str q10, [x17, x16]\n" + "fadd v1.4s, v20.4s, v4.4s\n" + "fadd v13.4s, v13.4s, v2.4s\n" + "add x17, x17, #16\n" + "str q5, [x28, x16]\n" + "fadd v5.4s, v17.4s, v22.4s\n" + "str q26, [x18, x15]\n" + "fsub v7.4s, v20.4s, v4.4s\n" + "fadd v8.4s, v19.4s, v1.4s\n" + "add x28, x28, #16\n" + "str q13, [x18, x16]\n" + "mov v4.16b, v1.16b\n" + "fsub v10.4s, v17.4s, v22.4s\n" + "add x18, x18, #16\n" + "mov v1.16b, v7.16b\n" + "fadd v8.4s, v8.4s, v5.4s\n" + "fmla v4.4s, v5.4s, v0.s[1]\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "fmla v1.4s, v10.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v18.4s\n" + "bne 2b\n" + "3:\n" + "fadd v3.4s, v12.4s, v23.4s\n" + "ldr q2, [x13]\n" + "fadd v27.4s, v9.4s, v11.4s\n" + "ldr q21, [x13, %[in_col_stride1]]\n" + "fsub v16.4s, v12.4s, v23.4s\n" + "ldr q26, [x13, x21]\n" + "fsub v9.4s, v9.4s, v11.4s\n" + "ldr q17, [x13, x22]\n" + "fadd v14.4s, v25.4s, v3.4s\n" + "ldr q19, [x13, x23]\n" + "mov v11.16b, v3.16b\n" + "ldr q10, [x13, x24]\n" + "mov v3.16b, v16.16b\n" + "ldr q15, [x26]\n" + "fmul v9.4s, v9.4s, v0.s[0]\n" + "ldr q12, [x26, %[in_col_stride1]]\n" + "fadd v14.4s, v14.4s, v27.4s\n" + "ldr q20, [x26, x21]\n" + "fmla v11.4s, v27.4s, v0.s[1]\n" + "ldr q24, [x26, x22]\n" + "fadd v23.4s, v21.4s, v26.4s\n" + "ldr q29, [x26, x23]\n" + "fadd v13.4s, v16.4s, v9.4s\n" + "ldr q5, [x26, x24]\n" + "fmla v3.4s, v9.4s, v0.s[1]\n" + "ldr q18, [x14]\n" + "fadd v30.4s, v17.4s, v19.4s\n" + "add %[inptr0], %[inptr0], #16\n" + "fadd v16.4s, v2.4s, v23.4s\n" + "add x25, x25, #16\n" + "fsub v21.4s, v21.4s, v26.4s\n" + "ldr q22, [x14, %[in_col_stride1]]\n" + "fadd v3.4s, v3.4s, v6.4s\n" + "ldr q28, [x14, x21]\n" + "fsub v19.4s, v17.4s, v19.4s\n" + "add x13, x13, #16\n" + "fadd v16.4s, v16.4s, v30.4s\n" + "add x26, x26, #16\n" + "mov v17.16b, v23.16b\n" + "fadd v26.4s, v12.4s, v20.4s\n" + "fsub v9.4s, v12.4s, v20.4s\n" + "ldr q2, [%[bptr]]\n" + "fmul v19.4s, v19.4s, v0.s[0]\n" + "add %[bptr], %[bptr], #16\n" + "fmla v17.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v24.4s, v29.4s\n" + "fadd v23.4s, v15.4s, v26.4s\n" + "fsub v12.4s, v24.4s, v29.4s\n" + "mov v15.16b, v26.16b\n" + "fadd v24.4s, v22.4s, v28.4s\n" + "fsub v22.4s, v22.4s, v28.4s\n" + "fadd v29.4s, v14.4s, v16.4s\n" + "fsub v16.4s, v14.4s, v16.4s\n" + "ldr q20, [x14, x22]\n" + "fadd v23.4s, v23.4s, v25.4s\n" + "fmul v12.4s, v12.4s, v0.s[0]\n" + "fmla v15.4s, v25.4s, v0.s[1]\n" + "mov v6.16b, v21.16b\n" + "fadd v30.4s, v21.4s, v19.4s\n" + "fadd v26.4s, v18.4s, v24.4s\n" + "mov v25.16b, v24.16b\n" + "fadd v18.4s, v8.4s, v29.4s\n" + "fmla v6.4s, v19.4s, v0.s[1]\n" + "mov v19.16b, v29.16b\n" + "fadd v27.4s, v11.4s, v17.4s\n" + "fsub v11.4s, v11.4s, v17.4s\n" + "fadd v29.4s, v13.4s, v30.4s\n" + "fsub v8.4s, v13.4s, v30.4s\n" + "fadd v14.4s, v9.4s, v12.4s\n" + "fadd v6.4s, v6.4s, v10.4s\n" + "ldr q28, [x14, x23]\n" + "fadd v17.4s, v4.4s, v27.4s\n" + "mov v4.16b, v27.16b\n" + "fmla v9.4s, v12.4s, v0.s[1]\n" + "fadd v27.4s, v20.4s, v28.4s\n" + "fsub v21.4s, v20.4s, v28.4s\n" + "fadd v10.4s, v7.4s, v29.4s\n" + "mov v7.16b, v29.16b\n" + "fadd v13.4s, v3.4s, v6.4s\n" + "fsub v3.4s, v3.4s, v6.4s\n" + "ldr q20, [x14, x24]\n" + "fadd v9.4s, v9.4s, v5.4s\n" + "fadd v26.4s, v26.4s, v27.4s\n" + "fmul v21.4s, v21.4s, v0.s[0]\n" + "add x14, x14, #16\n" + "fmla v25.4s, v27.4s, v0.s[1]\n" + "mov v12.16b, v22.16b\n" + "fadd v1.4s, v1.4s, v13.4s\n" + "mov v5.16b, v13.16b\n" + "fadd v13.4s, v23.4s, v26.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "fadd v26.4s, v22.4s, v21.4s\n" + "ldr q24, [x27]\n" + "fmla v12.4s, v21.4s, v0.s[1]\n" + "fadd v30.4s, v15.4s, v25.4s\n" + "fsub v15.4s, v15.4s, v25.4s\n" + "ldr q28, [x27, %[in_col_stride1]]\n" + "fadd v18.4s, v18.4s, v13.4s\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fmla v19.4s, v13.4s, v0.s[1]\n" + "ldr q22, [x27, x21]\n" + "fadd v12.4s, v12.4s, v20.4s\n" + "ldr q29, [x27, x22]\n" + "fadd v17.4s, v17.4s, v30.4s\n" + "fmul v15.4s, v15.4s, v0.s[0]\n" + "fmla v4.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v28.4s, v22.4s\n" + "fsub v27.4s, v28.4s, v22.4s\n" + "fadd v22.4s, v16.4s, v23.4s\n" + "fadd v18.4s, v18.4s, v2.4s\n" + "fadd v17.4s, v17.4s, v2.4s\n" + "fadd v19.4s, v19.4s, v2.4s\n" + "fadd v28.4s, v24.4s, v25.4s\n" + "mov v21.16b, v25.16b\n" + "fmla v16.4s, v23.4s, v0.s[1]\n" + "ldr q30, [x27, x23]\n" + "str q18, [%[outptr0]]\n" + "fadd v20.4s, v29.4s, v30.4s\n" + "str q17, [%[outptr0], x15]\n" + "fsub v18.4s, v29.4s, v30.4s\n" + "str q19, [x28]\n" + "mov v24.16b, v27.16b\n" + "fadd v13.4s, v14.4s, v26.4s\n" + "ldr q25, [x27, x24]\n" + "fadd v28.4s, v28.4s, v20.4s\n" + "add x27, x27, #16\n" + "fmul v18.4s, v18.4s, v0.s[0]\n" + "fmla v21.4s, v20.4s, v0.s[1]\n" + "fsub v30.4s, v14.4s, v26.4s\n" + "mov v14.16b, v8.16b\n" + "fadd v10.4s, v10.4s, v13.4s\n" + "fmla v7.4s, v13.4s, v0.s[1]\n" + "fadd v16.4s, v16.4s, v28.4s\n" + "fadd v29.4s, v11.4s, v15.4s\n" + "fadd v23.4s, v27.4s, v18.4s\n" + "fmla v24.4s, v18.4s, v0.s[1]\n" + "fmul v30.4s, v30.4s, v0.s[0]\n" + "mov v26.16b, v11.16b\n" + "fadd v27.4s, v9.4s, v12.4s\n" + "fsub v6.4s, v9.4s, v12.4s\n" + "mov v13.16b, v3.16b\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "fadd v24.4s, v24.4s, v25.4s\n" + "fmla v26.4s, v15.4s, v0.s[1]\n" + "fadd v8.4s, v8.4s, v30.4s\n" + "fmla v14.4s, v30.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v27.4s\n" + "fmul v6.4s, v6.4s, v0.s[0]\n" + "str q10, [%[outptr0], %[output_col_stride1]]\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "fadd v26.4s, v26.4s, v21.4s\n" + "fadd v22.4s, v22.4s, v2.4s\n" + "fadd v14.4s, v14.4s, v23.4s\n" + "fadd v8.4s, v8.4s, v2.4s\n" + "fadd v10.4s, v3.4s, v6.4s\n" + "fmla v13.4s, v6.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v2.4s\n" + "fadd v29.4s, v29.4s, v2.4s\n" + "str q22, [x17]\n" + "fadd v7.4s, v7.4s, v2.4s\n" + "str q8, [x17, %[output_col_stride1]]\n" + "fadd v4.4s, v4.4s, v2.4s\n" + "fadd v13.4s, v13.4s, v24.4s\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "str q1, [%[outptr0], x16]\n" + "fadd v5.4s, v5.4s, v2.4s\n" + "str q29, [x17, x15]\n" + "fadd v16.4s, v16.4s, v2.4s\n" + "str q7, [x28, %[output_col_stride1]]\n" + "fadd v14.4s, v14.4s, v2.4s\n" + "str q10, [x17, x16]\n" + "fadd v26.4s, v26.4s, v2.4s\n" + "str q4, [x28, x15]\n" + "fadd v13.4s, v13.4s, v2.4s\n" + "str q5, [x28, x16]\n" + "add %[outptr0], %[outptr0], #16\n" + "str q16, [x18]\n" + "add x17, x17, #16\n" + "str q14, [x18, %[output_col_stride1]]\n" + "add x28, x28, #16\n" + "str q26, [x18, x15]\n" + "str q13, [x18, x16]\n" + "add x18, x18, #16\n" + "4:\n" + "cmp x20, #2\n" + "blt 5f\n" + "ldr d19, [%[inptr0]]\n" + "ldr d20, [%[inptr0], %[in_col_stride1]]\n" + "sub x20, x20, #2\n" + "ldr d4, [%[inptr0], x21]\n" + "ldr d17, [%[inptr0], x22]\n" + "fadd v1.4s, v20.4s, v4.4s\n" + "ldr d22, [%[inptr0], x23]\n" + "fadd v5.4s, v17.4s, v22.4s\n" + "ldr d18, [%[inptr0], x24]\n" + "fsub v7.4s, v20.4s, v4.4s\n" + "ldr d25, [x25]\n" + "fsub v10.4s, v17.4s, v22.4s\n" + "ldr d12, [x25, %[in_col_stride1]]\n" + "fadd v8.4s, v19.4s, v1.4s\n" + "ldr d23, [x25, x21]\n" + "mov v4.16b, v1.16b\n" + "ldr d9, [x25, x22]\n" + "mov v1.16b, v7.16b\n" + "ldr d11, [x25, x23]\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "ldr d6, [x25, x24]\n" + "fadd v8.4s, v8.4s, v5.4s\n" + "ldr d2, [x13]\n" + "fmla v4.4s, v5.4s, v0.s[1]\n" + "ldr d21, [x13, %[in_col_stride1]]\n" + "fadd v3.4s, v12.4s, v23.4s\n" + "ldr d26, [x13, x21]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "ldr d17, [x13, x22]\n" + "fmla v1.4s, v10.4s, v0.s[1]\n" + "ldr d19, [x13, x23]\n" + "fadd v27.4s, v9.4s, v11.4s\n" + "ldr d10, [x13, x24]\n" + "fadd v14.4s, v25.4s, v3.4s\n" + "ldr d15, [x26]\n" + "fsub v16.4s, v12.4s, v23.4s\n" + "ldr d12, [x26, %[in_col_stride1]]\n" + "fadd v1.4s, v1.4s, v18.4s\n" + "ldr d20, [x26, x21]\n" + "fsub v9.4s, v9.4s, v11.4s\n" + "ldr d24, [x26, x22]\n" + "fadd v14.4s, v14.4s, v27.4s\n" + "ldr d29, [x26, x23]\n" + "mov v11.16b, v3.16b\n" + "ldr d5, [x26, x24]\n" + "mov v3.16b, v16.16b\n" + "ldr d18, [x14]\n" + "fmul v9.4s, v9.4s, v0.s[0]\n" + "add %[inptr0], %[inptr0], #8\n" + "fmla v11.4s, v27.4s, v0.s[1]\n" + "add x25, x25, #8\n" + "fadd v23.4s, v21.4s, v26.4s\n" + "add x13, x13, #8\n" + "fsub v21.4s, v21.4s, v26.4s\n" + "ldr d22, [x14, %[in_col_stride1]]\n" + "fadd v13.4s, v16.4s, v9.4s\n" + "add x26, x26, #8\n" + "fmla v3.4s, v9.4s, v0.s[1]\n" + "fadd v30.4s, v17.4s, v19.4s\n" + "fadd v16.4s, v2.4s, v23.4s\n" + "fsub v19.4s, v17.4s, v19.4s\n" + "mov v17.16b, v23.16b\n" + "fadd v26.4s, v12.4s, v20.4s\n" + "fsub v9.4s, v12.4s, v20.4s\n" + "ldr d28, [x14, x21]\n" + "fadd v3.4s, v3.4s, v6.4s\n" + "ldr d20, [x14, x22]\n" + "fadd v16.4s, v16.4s, v30.4s\n" + "fmul v19.4s, v19.4s, v0.s[0]\n" + "fmla v17.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v24.4s, v29.4s\n" + "fadd v23.4s, v15.4s, v26.4s\n" + "fsub v12.4s, v24.4s, v29.4s\n" + "mov v15.16b, v26.16b\n" + "fadd v24.4s, v22.4s, v28.4s\n" + "fsub v22.4s, v22.4s, v28.4s\n" + "fadd v29.4s, v14.4s, v16.4s\n" + "fsub v16.4s, v14.4s, v16.4s\n" + "ldr d28, [x14, x23]\n" + "fadd v23.4s, v23.4s, v25.4s\n" + "fmul v12.4s, v12.4s, v0.s[0]\n" + "fmla v15.4s, v25.4s, v0.s[1]\n" + "mov v6.16b, v21.16b\n" + "fadd v30.4s, v21.4s, v19.4s\n" + "fadd v26.4s, v18.4s, v24.4s\n" + "mov v25.16b, v24.16b\n" + "fadd v18.4s, v8.4s, v29.4s\n" + "fmla v6.4s, v19.4s, v0.s[1]\n" + "fadd v27.4s, v20.4s, v28.4s\n" + "fsub v21.4s, v20.4s, v28.4s\n" + "mov v19.16b, v29.16b\n" + "fadd v29.4s, v13.4s, v30.4s\n" + "fsub v8.4s, v13.4s, v30.4s\n" + "fadd v14.4s, v9.4s, v12.4s\n" + "fadd v6.4s, v6.4s, v10.4s\n" + "ldr d20, [x14, x24]\n" + "fadd v26.4s, v26.4s, v27.4s\n" + "add x14, x14, #8\n" + "fmla v9.4s, v12.4s, v0.s[1]\n" + "ldr d24, [x27]\n" + "fmul v21.4s, v21.4s, v0.s[0]\n" + "fmla v25.4s, v27.4s, v0.s[1]\n" + "fadd v10.4s, v7.4s, v29.4s\n" + "ldr d2, [%[bptr]]\n" + "mov v7.16b, v29.16b\n" + "add %[bptr], %[bptr], #8\n" + "fadd v9.4s, v9.4s, v5.4s\n" + "fadd v13.4s, v23.4s, v26.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "fadd v27.4s, v11.4s, v17.4s\n" + "fsub v11.4s, v11.4s, v17.4s\n" + "fadd v30.4s, v15.4s, v25.4s\n" + "fsub v15.4s, v15.4s, v25.4s\n" + "ldr d28, [x27, %[in_col_stride1]]\n" + "fadd v18.4s, v18.4s, v13.4s\n" + "fmla v19.4s, v13.4s, v0.s[1]\n" + "fadd v26.4s, v22.4s, v21.4s\n" + "mov v12.16b, v22.16b\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fadd v17.4s, v4.4s, v27.4s\n" + "fmul v15.4s, v15.4s, v0.s[0]\n" + "mov v4.16b, v27.16b\n" + "fmla v12.4s, v21.4s, v0.s[1]\n" + "ldr d22, [x27, x21]\n" + "fadd v18.4s, v18.4s, v2.4s\n" + "fadd v19.4s, v19.4s, v2.4s\n" + "fadd v17.4s, v17.4s, v30.4s\n" + "fmla v4.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v28.4s, v22.4s\n" + "fsub v27.4s, v28.4s, v22.4s\n" + "fadd v12.4s, v12.4s, v20.4s\n" + "ldr d29, [x27, x22]\n" + "str d18, [%[outptr0]]\n" + "fadd v22.4s, v16.4s, v23.4s\n" + "str d19, [x28]\n" + "fadd v28.4s, v24.4s, v25.4s\n" + "ldr d30, [x27, x23]\n" + "fadd v20.4s, v29.4s, v30.4s\n" + "fsub v18.4s, v29.4s, v30.4s\n" + "mov v21.16b, v25.16b\n" + "ldr d25, [x27, x24]\n" + "fmla v16.4s, v23.4s, v0.s[1]\n" + "add x27, x27, #8\n" + "mov v24.16b, v27.16b\n" + "fadd v17.4s, v17.4s, v2.4s\n" + "fadd v28.4s, v28.4s, v20.4s\n" + "fmul v18.4s, v18.4s, v0.s[0]\n" + "fmla v21.4s, v20.4s, v0.s[1]\n" + "fadd v13.4s, v14.4s, v26.4s\n" + "fsub v30.4s, v14.4s, v26.4s\n" + "mov v14.16b, v8.16b\n" + "str d17, [%[outptr0], x15]\n" + "fadd v29.4s, v11.4s, v15.4s\n" + "fadd v23.4s, v27.4s, v18.4s\n" + "fmla v24.4s, v18.4s, v0.s[1]\n" + "fadd v16.4s, v16.4s, v28.4s\n" + "fadd v10.4s, v10.4s, v13.4s\n" + "fmul v30.4s, v30.4s, v0.s[0]\n" + "fmla v7.4s, v13.4s, v0.s[1]\n" + "mov v26.16b, v11.16b\n" + "fadd v13.4s, v3.4s, v6.4s\n" + "fadd v24.4s, v24.4s, v25.4s\n" + "fadd v27.4s, v9.4s, v12.4s\n" + "fsub v3.4s, v3.4s, v6.4s\n" + "fsub v6.4s, v9.4s, v12.4s\n" + "fadd v8.4s, v8.4s, v30.4s\n" + "fmla v14.4s, v30.4s, v0.s[1]\n" + "fmla v26.4s, v15.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v13.4s\n" + "mov v5.16b, v13.16b\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "fmul v6.4s, v6.4s, v0.s[0]\n" + "mov v13.16b, v3.16b\n" + "fadd v14.4s, v14.4s, v23.4s\n" + "fadd v22.4s, v22.4s, v2.4s\n" + "fadd v26.4s, v26.4s, v21.4s\n" + "fadd v1.4s, v1.4s, v27.4s\n" + "str d10, [%[outptr0], %[output_col_stride1]]\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "fadd v10.4s, v3.4s, v6.4s\n" + "fmla v13.4s, v6.4s, v0.s[1]\n" + "str d22, [x17]\n" + "fadd v8.4s, v8.4s, v2.4s\n" + "fadd v1.4s, v1.4s, v2.4s\n" + "fadd v29.4s, v29.4s, v2.4s\n" + "fadd v7.4s, v7.4s, v2.4s\n" + "fadd v4.4s, v4.4s, v2.4s\n" + "fadd v13.4s, v13.4s, v24.4s\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "str d8, [x17, %[output_col_stride1]]\n" + "fadd v5.4s, v5.4s, v2.4s\n" + "str d1, [%[outptr0], x16]\n" + "fadd v16.4s, v16.4s, v2.4s\n" + "str d29, [x17, x15]\n" + "fadd v14.4s, v14.4s, v2.4s\n" + "str d10, [x17, x16]\n" + "fadd v26.4s, v26.4s, v2.4s\n" + "str d7, [x28, %[output_col_stride1]]\n" + "fadd v13.4s, v13.4s, v2.4s\n" + "str d4, [x28, x15]\n" + "add %[outptr0], %[outptr0], #8\n" + "str d5, [x28, x16]\n" + "add x17, x17, #8\n" + "str d16, [x18]\n" + "add x28, x28, #8\n" + "str d14, [x18, %[output_col_stride1]]\n" + "str d26, [x18, x15]\n" + "str d13, [x18, x16]\n" + "add x18, x18, #8\n" + "5:\n" + "cbz x20, 6f\n" + "ldr s19, [%[inptr0]]\n" + "ldr s20, [%[inptr0], %[in_col_stride1]]\n" + "ldr s4, [%[inptr0], x21]\n" + "fadd v1.4s, v20.4s, v4.4s\n" + "ldr s17, [%[inptr0], x22]\n" + "fsub v7.4s, v20.4s, v4.4s\n" + "ldr s22, [%[inptr0], x23]\n" + "fadd v5.4s, v17.4s, v22.4s\n" + "ldr s18, [%[inptr0], x24]\n" + "fsub v10.4s, v17.4s, v22.4s\n" + "ldr s25, [x25]\n" + "fadd v8.4s, v19.4s, v1.4s\n" + "ldr s12, [x25, %[in_col_stride1]]\n" + "mov v4.16b, v1.16b\n" + "ldr s23, [x25, x21]\n" + "mov v1.16b, v7.16b\n" + "ldr s9, [x25, x22]\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "ldr s11, [x25, x23]\n" + "fadd v8.4s, v8.4s, v5.4s\n" + "ldr s6, [x25, x24]\n" + "fmla v4.4s, v5.4s, v0.s[1]\n" + "ldr s2, [x13]\n" + "fadd v3.4s, v12.4s, v23.4s\n" + "ldr s21, [x13, %[in_col_stride1]]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "ldr s26, [x13, x21]\n" + "fmla v1.4s, v10.4s, v0.s[1]\n" + "ldr s17, [x13, x22]\n" + "fadd v27.4s, v9.4s, v11.4s\n" + "ldr s19, [x13, x23]\n" + "fadd v14.4s, v25.4s, v3.4s\n" + "ldr s10, [x13, x24]\n" + "fsub v16.4s, v12.4s, v23.4s\n" + "ldr s15, [x26]\n" + "fadd v1.4s, v1.4s, v18.4s\n" + "ldr s12, [x26, %[in_col_stride1]]\n" + "fsub v9.4s, v9.4s, v11.4s\n" + "ldr s20, [x26, x21]\n" + "fadd v14.4s, v14.4s, v27.4s\n" + "ldr s24, [x26, x22]\n" + "mov v11.16b, v3.16b\n" + "ldr s29, [x26, x23]\n" + "mov v3.16b, v16.16b\n" + "ldr s5, [x26, x24]\n" + "fmul v9.4s, v9.4s, v0.s[0]\n" + "ldr s18, [x14]\n" + "fmla v11.4s, v27.4s, v0.s[1]\n" + "fadd v23.4s, v21.4s, v26.4s\n" + "fsub v21.4s, v21.4s, v26.4s\n" + "fadd v30.4s, v17.4s, v19.4s\n" + "fsub v19.4s, v17.4s, v19.4s\n" + "ldr s22, [x14, %[in_col_stride1]]\n" + "fadd v13.4s, v16.4s, v9.4s\n" + "fmla v3.4s, v9.4s, v0.s[1]\n" + "fadd v16.4s, v2.4s, v23.4s\n" + "mov v17.16b, v23.16b\n" + "fadd v26.4s, v12.4s, v20.4s\n" + "fsub v9.4s, v12.4s, v20.4s\n" + "fmul v19.4s, v19.4s, v0.s[0]\n" + "ldr s28, [x14, x21]\n" + "fadd v3.4s, v3.4s, v6.4s\n" + "ldr s20, [x14, x22]\n" + "fadd v16.4s, v16.4s, v30.4s\n" + "fmla v17.4s, v30.4s, v0.s[1]\n" + "fadd v25.4s, v24.4s, v29.4s\n" + "fadd v23.4s, v15.4s, v26.4s\n" + "fsub v12.4s, v24.4s, v29.4s\n" + "mov v15.16b, v26.16b\n" + "fadd v24.4s, v22.4s, v28.4s\n" + "fsub v22.4s, v22.4s, v28.4s\n" + "fadd v30.4s, v21.4s, v19.4s\n" + "mov v6.16b, v21.16b\n" + "fadd v23.4s, v23.4s, v25.4s\n" + "fmla v15.4s, v25.4s, v0.s[1]\n" + "fmul v12.4s, v12.4s, v0.s[0]\n" + "ldr s28, [x14, x23]\n" + "fmla v6.4s, v19.4s, v0.s[1]\n" + "fadd v27.4s, v20.4s, v28.4s\n" + "fadd v26.4s, v18.4s, v24.4s\n" + "fsub v21.4s, v20.4s, v28.4s\n" + "mov v25.16b, v24.16b\n" + "fadd v29.4s, v14.4s, v16.4s\n" + "fsub v16.4s, v14.4s, v16.4s\n" + "ldr s20, [x14, x24]\n" + "fadd v6.4s, v6.4s, v10.4s\n" + "ldr s24, [x27]\n" + "fadd v26.4s, v26.4s, v27.4s\n" + "fmul v21.4s, v21.4s, v0.s[0]\n" + "fmla v25.4s, v27.4s, v0.s[1]\n" + "fadd v18.4s, v8.4s, v29.4s\n" + "mov v19.16b, v29.16b\n" + "fadd v29.4s, v13.4s, v30.4s\n" + "fsub v8.4s, v13.4s, v30.4s\n" + "fadd v27.4s, v11.4s, v17.4s\n" + "fsub v11.4s, v11.4s, v17.4s\n" + "fadd v13.4s, v23.4s, v26.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "ldr s28, [x27, %[in_col_stride1]]\n" + "fadd v10.4s, v7.4s, v29.4s\n" + "mov v7.16b, v29.16b\n" + "fadd v17.4s, v4.4s, v27.4s\n" + "mov v4.16b, v27.16b\n" + "fadd v18.4s, v18.4s, v13.4s\n" + "fmla v19.4s, v13.4s, v0.s[1]\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fadd v30.4s, v15.4s, v25.4s\n" + "fsub v15.4s, v15.4s, v25.4s\n" + "fadd v13.4s, v3.4s, v6.4s\n" + "fsub v3.4s, v3.4s, v6.4s\n" + "ldr s2, [%[bptr]]\n" + "fadd v18.4s, v18.4s, v2.4s\n" + "fadd v19.4s, v19.4s, v2.4s\n" + "fadd v17.4s, v17.4s, v30.4s\n" + "fmla v4.4s, v30.4s, v0.s[1]\n" + "fadd v14.4s, v9.4s, v12.4s\n" + "fmul v15.4s, v15.4s, v0.s[0]\n" + "fadd v1.4s, v1.4s, v13.4s\n" + "str s18, [%[outptr0]]\n" + "fadd v26.4s, v22.4s, v21.4s\n" + "str s19, [x28]\n" + "fmla v9.4s, v12.4s, v0.s[1]\n" + "mov v12.16b, v22.16b\n" + "ldr s22, [x27, x21]\n" + "fadd v25.4s, v28.4s, v22.4s\n" + "fsub v27.4s, v28.4s, v22.4s\n" + "fadd v22.4s, v16.4s, v23.4s\n" + "fadd v9.4s, v9.4s, v5.4s\n" + "ldr s29, [x27, x22]\n" + "fmla v12.4s, v21.4s, v0.s[1]\n" + "ldr s30, [x27, x23]\n" + "fadd v28.4s, v24.4s, v25.4s\n" + "mov v21.16b, v25.16b\n" + "fmla v16.4s, v23.4s, v0.s[1]\n" + "ldr s25, [x27, x24]\n" + "mov v5.16b, v13.16b\n" + "fadd v17.4s, v17.4s, v2.4s\n" + "fadd v12.4s, v12.4s, v20.4s\n" + "fadd v20.4s, v29.4s, v30.4s\n" + "fsub v18.4s, v29.4s, v30.4s\n" + "mov v24.16b, v27.16b\n" + "fadd v22.4s, v22.4s, v2.4s\n" + "fadd v4.4s, v4.4s, v2.4s\n" + "str s17, [%[outptr0], x15]\n" + "fadd v13.4s, v14.4s, v26.4s\n" + "fadd v28.4s, v28.4s, v20.4s\n" + "fmla v21.4s, v20.4s, v0.s[1]\n" + "fmul v18.4s, v18.4s, v0.s[0]\n" + "fsub v30.4s, v14.4s, v26.4s\n" + "str s22, [x17]\n" + "mov v14.16b, v8.16b\n" + "str s4, [x28, x15]\n" + "fadd v10.4s, v10.4s, v13.4s\n" + "fadd v16.4s, v16.4s, v28.4s\n" + "fmla v7.4s, v13.4s, v0.s[1]\n" + "fadd v23.4s, v27.4s, v18.4s\n" + "fmla v24.4s, v18.4s, v0.s[1]\n" + "fmul v30.4s, v30.4s, v0.s[0]\n" + "fadd v29.4s, v11.4s, v15.4s\n" + "mov v26.16b, v11.16b\n" + "fadd v27.4s, v9.4s, v12.4s\n" + "fsub v6.4s, v9.4s, v12.4s\n" + "mov v13.16b, v3.16b\n" + "fadd v24.4s, v24.4s, v25.4s\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "fadd v8.4s, v8.4s, v30.4s\n" + "fmla v14.4s, v30.4s, v0.s[1]\n" + "fmla v26.4s, v15.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v27.4s\n" + "fmul v6.4s, v6.4s, v0.s[0]\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "str s10, [%[outptr0], %[output_col_stride1]]\n" + "fadd v29.4s, v29.4s, v2.4s\n" + "fadd v14.4s, v14.4s, v23.4s\n" + "fadd v8.4s, v8.4s, v2.4s\n" + "fadd v26.4s, v26.4s, v21.4s\n" + "fadd v1.4s, v1.4s, v2.4s\n" + "fadd v10.4s, v3.4s, v6.4s\n" + "fmla v13.4s, v6.4s, v0.s[1]\n" + "str s29, [x17, x15]\n" + "fadd v7.4s, v7.4s, v2.4s\n" + "str s8, [x17, %[output_col_stride1]]\n" + "fadd v5.4s, v5.4s, v2.4s\n" + "str s1, [%[outptr0], x16]\n" + "fadd v16.4s, v16.4s, v2.4s\n" + "fadd v13.4s, v13.4s, v24.4s\n" + "fadd v10.4s, v10.4s, v2.4s\n" + "str s7, [x28, %[output_col_stride1]]\n" + "fadd v14.4s, v14.4s, v2.4s\n" + "str s5, [x28, x16]\n" + "fadd v26.4s, v26.4s, v2.4s\n" + "str s16, [x18]\n" + "fadd v13.4s, v13.4s, v2.4s\n" + "str s10, [x17, x16]\n" + "str s14, [x18, %[output_col_stride1]]\n" + "str s26, [x18, x15]\n" + "str s13, [x18, x16]\n" + "6:\n" + : [bptr] "+r" (bptr), [outptr0] "+r" (output), [inptr0] "+r" (inptr) + : [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [pcoeffs] "r" (coeffs), [n_channels] "r" ((long) n_channels), [in_row_stride] "r" (6 * matrix_stride * sizeof(float)), [in_col_stride1] "r" (matrix_stride * sizeof(float)) + : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" + ); + } + else + { + __asm__ __volatile__ ( + "ldr d0, [%[pcoeffs]]\n" + "add x21, %[in_col_stride1], %[in_col_stride1]\n" // Compute input column stride 2 + "add x22, x21, %[in_col_stride1]\n" // Compute input column stride 3 + "add x25, %[inptr0], %[in_row_stride]\n" // Compute input row pointers + "add x15, %[output_col_stride1], %[output_col_stride1]\n" // Compute output column stride 2 + "add x23, x22, %[in_col_stride1]\n" // Compute input column stride 4 + "add x13, x25, %[in_row_stride]\n" // Compute input row pointers + "add x16, x15, %[output_col_stride1]\n" // Compute output column stride 3 + "add x24, x23, %[in_col_stride1]\n" // Compute input column stride 5 + "add x26, x13, %[in_row_stride]\n" // Compute input row pointers + "add x17, %[outptr0], %[output_row_stride]\n" // Compute output row pointer 1 + "add x14, x26, %[in_row_stride]\n" // Compute input row pointers + "add x28, x17, %[output_row_stride]\n" // Compute output row pointer 2 + "lsr x19, %[n_channels], #2\n" + "add x27, x14, %[in_row_stride]\n" // Compute input row pointers + "add x18, x28, %[output_row_stride]\n" // Compute output row pointer 3 + "and x20, %[n_channels], #3\n" + "cbz x19, 4f\n" + "1:\n" // Quad head + "ldr q17, [%[inptr0]]\n" + "subs x19, x19, #1\n" + "ldr q23, [%[inptr0], %[in_col_stride1]]\n" + "ldr q27, [%[inptr0], x21]\n" + "fadd v4.4s, v23.4s, v27.4s\n" + "ldr q24, [%[inptr0], x22]\n" + "fsub v13.4s, v23.4s, v27.4s\n" + "ldr q11, [%[inptr0], x23]\n" + "fadd v10.4s, v24.4s, v11.4s\n" + "ldr q12, [%[inptr0], x24]\n" + "fsub v11.4s, v24.4s, v11.4s\n" + "ldr q20, [x25]\n" + "fadd v7.4s, v17.4s, v4.4s\n" + "ldr q19, [x25, %[in_col_stride1]]\n" + "mov v4.16b, v4.16b\n" + "ldr q22, [x25, x21]\n" + "mov v1.16b, v13.16b\n" + "ldr q14, [x25, x22]\n" + "fmul v11.4s, v11.4s, v0.s[0]\n" + "ldr q18, [x25, x23]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "ldr q3, [x25, x24]\n" + "fmla v4.4s, v10.4s, v0.s[1]\n" + "fadd v8.4s, v13.4s, v11.4s\n" + "fmla v1.4s, v11.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v12.4s\n" + "beq 3f\n" + "2:\n" // Quad loop + "fadd v2.4s, v19.4s, v22.4s\n" + "ldr q16, [x13]\n" + "fadd v23.4s, v14.4s, v18.4s\n" + "ldr q21, [x13, %[in_col_stride1]]\n" + "fsub v15.4s, v19.4s, v22.4s\n" + "ldr q24, [x13, x21]\n" + "fsub v31.4s, v14.4s, v18.4s\n" + "ldr q25, [x13, x22]\n" + "fadd v11.4s, v20.4s, v2.4s\n" + "ldr q17, [x13, x23]\n" + "mov v13.16b, v2.16b\n" + "ldr q9, [x13, x24]\n" + "mov v2.16b, v15.16b\n" + "ldr q6, [x26]\n" + "fmul v31.4s, v31.4s, v0.s[0]\n" + "ldr q19, [x26, %[in_col_stride1]]\n" + "fadd v11.4s, v11.4s, v23.4s\n" + "ldr q22, [x26, x21]\n" + "fmla v13.4s, v23.4s, v0.s[1]\n" + "ldr q12, [x26, x22]\n" + "fadd v29.4s, v21.4s, v24.4s\n" + "ldr q26, [x26, x23]\n" + "fadd v15.4s, v15.4s, v31.4s\n" + "ldr q5, [x26, x24]\n" + "fmla v2.4s, v31.4s, v0.s[1]\n" + "ldr q10, [x14]\n" + "fadd v18.4s, v25.4s, v17.4s\n" + "add %[inptr0], %[inptr0], #16\n" + "fadd v27.4s, v16.4s, v29.4s\n" + "add x25, x25, #16\n" + "fsub v14.4s, v21.4s, v24.4s\n" + "ldr q30, [x14, %[in_col_stride1]]\n" + "fadd v2.4s, v2.4s, v3.4s\n" + "ldr q31, [x14, x21]\n" + "fsub v28.4s, v25.4s, v17.4s\n" + "add x13, x13, #16\n" + "fadd v27.4s, v27.4s, v18.4s\n" + "add x26, x26, #16\n" + "mov v21.16b, v29.16b\n" + "subs x19, x19, #1\n" + "fadd v20.4s, v19.4s, v22.4s\n" + "fsub v17.4s, v19.4s, v22.4s\n" + "fmul v28.4s, v28.4s, v0.s[0]\n" + "ldr q23, [x14, x22]\n" + "fmla v21.4s, v18.4s, v0.s[1]\n" + "fadd v29.4s, v12.4s, v26.4s\n" + "fsub v16.4s, v12.4s, v26.4s\n" + "fadd v25.4s, v30.4s, v31.4s\n" + "fadd v24.4s, v6.4s, v20.4s\n" + "mov v6.16b, v20.16b\n" + "fsub v22.4s, v30.4s, v31.4s\n" + "fadd v31.4s, v11.4s, v27.4s\n" + "fsub v12.4s, v11.4s, v27.4s\n" + "ldr q26, [x14, x23]\n" + "fmul v16.4s, v16.4s, v0.s[0]\n" + "fmla v6.4s, v29.4s, v0.s[1]\n" + "fadd v24.4s, v24.4s, v29.4s\n" + "mov v3.16b, v14.16b\n" + "fadd v20.4s, v14.4s, v28.4s\n" + "fadd v29.4s, v10.4s, v25.4s\n" + "mov v10.16b, v25.16b\n" + "fadd v25.4s, v7.4s, v31.4s\n" + "fmla v3.4s, v28.4s, v0.s[1]\n" + "fadd v14.4s, v23.4s, v26.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "mov v26.16b, v31.16b\n" + "fadd v31.4s, v15.4s, v20.4s\n" + "fsub v11.4s, v15.4s, v20.4s\n" + "fadd v20.4s, v17.4s, v16.4s\n" + "mov v7.16b, v17.16b\n" + "fadd v3.4s, v3.4s, v9.4s\n" + "ldr q18, [x14, x24]\n" + "fadd v29.4s, v29.4s, v14.4s\n" + "add x14, x14, #16\n" + "fmla v7.4s, v16.4s, v0.s[1]\n" + "ldr q19, [x27]\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fmla v10.4s, v14.4s, v0.s[1]\n" + "fadd v15.4s, v8.4s, v31.4s\n" + "mov v14.16b, v31.16b\n" + "fadd v28.4s, v24.4s, v29.4s\n" + "fsub v24.4s, v24.4s, v29.4s\n" + "fadd v7.4s, v7.4s, v5.4s\n" + "ldr q27, [x27, %[in_col_stride1]]\n" + "fadd v30.4s, v13.4s, v21.4s\n" + "fsub v9.4s, v13.4s, v21.4s\n" + "fadd v17.4s, v22.4s, v23.4s\n" + "mov v8.16b, v22.16b\n" + "fadd v25.4s, v25.4s, v28.4s\n" + "fmul v24.4s, v24.4s, v0.s[0]\n" + "fmla v26.4s, v28.4s, v0.s[1]\n" + "ldr q29, [x27, x21]\n" + "fmla v8.4s, v23.4s, v0.s[1]\n" + "ldr q28, [x27, x22]\n" + "fadd v13.4s, v4.4s, v30.4s\n" + "mov v4.16b, v30.16b\n" + "str q25, [%[outptr0]]\n" // Store output (0, 0) + "fadd v16.4s, v27.4s, v29.4s\n" + "str q26, [x28]\n" // Store output (2, 0) + "fsub v29.4s, v27.4s, v29.4s\n" + "fadd v8.4s, v8.4s, v18.4s\n" + "ldr q23, [x27, x23]\n" + "fadd v30.4s, v28.4s, v23.4s\n" + "ldr q25, [x27, x24]\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "add x27, x27, #16\n" + "fsub v27.4s, v28.4s, v23.4s\n" + "mov v16.16b, v16.16b\n" + "fadd v22.4s, v20.4s, v17.4s\n" + "fsub v20.4s, v20.4s, v17.4s\n" + "fadd v21.4s, v12.4s, v24.4s\n" + "mov v26.16b, v12.16b\n" + "fadd v19.4s, v19.4s, v30.4s\n" + "fmla v16.4s, v30.4s, v0.s[1]\n" + "fmul v27.4s, v27.4s, v0.s[0]\n" + "ldr q17, [%[inptr0]]\n" + "fmla v26.4s, v24.4s, v0.s[1]\n" + "ldr q23, [%[inptr0], %[in_col_stride1]]\n" + "str q21, [x17]\n" // Store output (1, 0) + "mov v5.16b, v29.16b\n" + "fadd v15.4s, v15.4s, v22.4s\n" + "fmul v20.4s, v20.4s, v0.s[0]\n" + "fadd v18.4s, v29.4s, v27.4s\n" + "fmla v14.4s, v22.4s, v0.s[1]\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "ldr q27, [%[inptr0], x21]\n" + "fadd v26.4s, v26.4s, v19.4s\n" + "ldr q24, [%[inptr0], x22]\n" + "str q15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1) + "fadd v12.4s, v11.4s, v20.4s\n" + "str q14, [x28, %[output_col_stride1]]\n" // Store output (2, 1) + "mov v28.16b, v11.16b\n" + "fadd v5.4s, v5.4s, v25.4s\n" + "ldr q11, [%[inptr0], x23]\n" + "str q26, [x18]\n" // Store output (3, 0) + "fadd v21.4s, v6.4s, v10.4s\n" + "str q12, [x17, %[output_col_stride1]]\n" // Store output (1, 1) + "fmla v28.4s, v20.4s, v0.s[1]\n" + "fsub v10.4s, v6.4s, v10.4s\n" + "ldr q12, [%[inptr0], x24]\n" + "mov v15.16b, v9.16b\n" + "ldr q20, [x25]\n" + "fadd v13.4s, v13.4s, v21.4s\n" + "ldr q19, [x25, %[in_col_stride1]]\n" + "fadd v28.4s, v28.4s, v18.4s\n" + "ldr q22, [x25, x21]\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "ldr q14, [x25, x22]\n" + "fmla v4.4s, v21.4s, v0.s[1]\n" + "ldr q18, [x25, x23]\n" + "str q13, [%[outptr0], x15]\n" // Store output (0, 2) + "fadd v6.4s, v2.4s, v3.4s\n" + "str q28, [x18, %[output_col_stride1]]\n" // Store output (3, 1) + "fadd v30.4s, v7.4s, v8.4s\n" + "fadd v13.4s, v9.4s, v10.4s\n" + "fmla v15.4s, v10.4s, v0.s[1]\n" + "str q4, [x28, x15]\n" // Store output (2, 2) + "fsub v2.4s, v2.4s, v3.4s\n" + "fadd v1.4s, v1.4s, v6.4s\n" + "ldr q3, [x25, x24]\n" + "fsub v8.4s, v7.4s, v8.4s\n" + "mov v6.16b, v6.16b\n" + "str q13, [x17, x15]\n" // Store output (1, 2) + "fadd v15.4s, v15.4s, v16.4s\n" + "mov v9.16b, v2.16b\n" + "fadd v4.4s, v23.4s, v27.4s\n" + "fadd v1.4s, v1.4s, v30.4s\n" + "fmla v6.4s, v30.4s, v0.s[1]\n" + "fmul v8.4s, v8.4s, v0.s[0]\n" + "fadd v10.4s, v24.4s, v11.4s\n" + "str q15, [x18, x15]\n" // Store output (3, 2) + "fsub v13.4s, v23.4s, v27.4s\n" + "fadd v7.4s, v17.4s, v4.4s\n" + "fsub v11.4s, v24.4s, v11.4s\n" + "str q1, [%[outptr0], x16]\n" // Store output (0, 3) + "mov v4.16b, v4.16b\n" + "str q6, [x28, x16]\n" // Store output (2, 3) + "fadd v2.4s, v2.4s, v8.4s\n" + "fmla v9.4s, v8.4s, v0.s[1]\n" + "add %[outptr0], %[outptr0], #16\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "add x28, x28, #16\n" + "fmul v11.4s, v11.4s, v0.s[0]\n" + "fmla v4.4s, v10.4s, v0.s[1]\n" + "str q2, [x17, x16]\n" // Store output (1, 3) + "mov v1.16b, v13.16b\n" + "fadd v9.4s, v9.4s, v5.4s\n" + "add x17, x17, #16\n" + "fadd v8.4s, v13.4s, v11.4s\n" + "fmla v1.4s, v11.4s, v0.s[1]\n" + "str q9, [x18, x16]\n" // Store output (3, 3) + "add x18, x18, #16\n" + "fadd v1.4s, v1.4s, v12.4s\n" + "bne 2b\n" + "3:\n" // Quad tail + "fadd v2.4s, v19.4s, v22.4s\n" + "ldr q16, [x13]\n" + "fadd v23.4s, v14.4s, v18.4s\n" + "ldr q21, [x13, %[in_col_stride1]]\n" + "fsub v15.4s, v19.4s, v22.4s\n" + "ldr q24, [x13, x21]\n" + "fsub v31.4s, v14.4s, v18.4s\n" + "ldr q25, [x13, x22]\n" + "fadd v11.4s, v20.4s, v2.4s\n" + "ldr q17, [x13, x23]\n" + "mov v13.16b, v2.16b\n" + "ldr q9, [x13, x24]\n" + "mov v2.16b, v15.16b\n" + "ldr q6, [x26]\n" + "fmul v31.4s, v31.4s, v0.s[0]\n" + "ldr q19, [x26, %[in_col_stride1]]\n" + "fadd v11.4s, v11.4s, v23.4s\n" + "ldr q22, [x26, x21]\n" + "fmla v13.4s, v23.4s, v0.s[1]\n" + "ldr q12, [x26, x22]\n" + "fadd v29.4s, v21.4s, v24.4s\n" + "ldr q26, [x26, x23]\n" + "fadd v15.4s, v15.4s, v31.4s\n" + "ldr q5, [x26, x24]\n" + "fmla v2.4s, v31.4s, v0.s[1]\n" + "ldr q10, [x14]\n" + "fadd v18.4s, v25.4s, v17.4s\n" + "add %[inptr0], %[inptr0], #16\n" + "fadd v27.4s, v16.4s, v29.4s\n" + "add x25, x25, #16\n" + "fsub v14.4s, v21.4s, v24.4s\n" + "ldr q30, [x14, %[in_col_stride1]]\n" + "fadd v2.4s, v2.4s, v3.4s\n" + "ldr q31, [x14, x21]\n" + "fsub v28.4s, v25.4s, v17.4s\n" + "add x13, x13, #16\n" + "fadd v27.4s, v27.4s, v18.4s\n" + "add x26, x26, #16\n" + "mov v21.16b, v29.16b\n" + "fadd v20.4s, v19.4s, v22.4s\n" + "fsub v17.4s, v19.4s, v22.4s\n" + "fadd v29.4s, v12.4s, v26.4s\n" + "fmul v28.4s, v28.4s, v0.s[0]\n" + "fsub v16.4s, v12.4s, v26.4s\n" + "fmla v21.4s, v18.4s, v0.s[1]\n" + "ldr q23, [x14, x22]\n" + "fadd v24.4s, v6.4s, v20.4s\n" + "mov v6.16b, v20.16b\n" + "fadd v25.4s, v30.4s, v31.4s\n" + "fsub v22.4s, v30.4s, v31.4s\n" + "fadd v20.4s, v14.4s, v28.4s\n" + "mov v3.16b, v14.16b\n" + "fmul v16.4s, v16.4s, v0.s[0]\n" + "fmla v6.4s, v29.4s, v0.s[1]\n" + "fadd v24.4s, v24.4s, v29.4s\n" + "ldr q26, [x14, x23]\n" + "fmla v3.4s, v28.4s, v0.s[1]\n" + "fadd v14.4s, v23.4s, v26.4s\n" + "fadd v29.4s, v10.4s, v25.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "mov v10.16b, v25.16b\n" + "fadd v31.4s, v11.4s, v27.4s\n" + "fsub v12.4s, v11.4s, v27.4s\n" + "ldr q18, [x14, x24]\n" + "fadd v3.4s, v3.4s, v9.4s\n" + "ldr q19, [x27]\n" + "fadd v29.4s, v29.4s, v14.4s\n" + "add x14, x14, #16\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fmla v10.4s, v14.4s, v0.s[1]\n" + "fadd v25.4s, v7.4s, v31.4s\n" + "mov v26.16b, v31.16b\n" + "fadd v31.4s, v15.4s, v20.4s\n" + "fsub v11.4s, v15.4s, v20.4s\n" + "fadd v28.4s, v24.4s, v29.4s\n" + "fsub v24.4s, v24.4s, v29.4s\n" + "fadd v30.4s, v13.4s, v21.4s\n" + "fsub v9.4s, v13.4s, v21.4s\n" + "fadd v20.4s, v17.4s, v16.4s\n" + "mov v7.16b, v17.16b\n" + "fadd v15.4s, v8.4s, v31.4s\n" + "mov v14.16b, v31.16b\n" + "fadd v25.4s, v25.4s, v28.4s\n" + "fmul v24.4s, v24.4s, v0.s[0]\n" + "fmla v7.4s, v16.4s, v0.s[1]\n" + "ldr q27, [x27, %[in_col_stride1]]\n" + "fmla v26.4s, v28.4s, v0.s[1]\n" + "ldr q29, [x27, x21]\n" + "fadd v13.4s, v4.4s, v30.4s\n" + "mov v4.16b, v30.16b\n" + "str q25, [%[outptr0]]\n" // Store output (0, 0) + "fadd v17.4s, v22.4s, v23.4s\n" + "fadd v7.4s, v7.4s, v5.4s\n" + "ldr q28, [x27, x22]\n" + "str q26, [x28]\n" // Store output (2, 0) + "mov v8.16b, v22.16b\n" + "fadd v16.4s, v27.4s, v29.4s\n" + "fsub v29.4s, v27.4s, v29.4s\n" + "fadd v21.4s, v12.4s, v24.4s\n" + "mov v26.16b, v12.16b\n" + "fmla v8.4s, v23.4s, v0.s[1]\n" + "fadd v22.4s, v20.4s, v17.4s\n" + "fsub v20.4s, v20.4s, v17.4s\n" + "ldr q23, [x27, x23]\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "mov v16.16b, v16.16b\n" + "str q21, [x17]\n" // Store output (1, 0) + "fadd v30.4s, v28.4s, v23.4s\n" + "fadd v8.4s, v8.4s, v18.4s\n" + "ldr q25, [x27, x24]\n" + "fsub v27.4s, v28.4s, v23.4s\n" + "add x27, x27, #16\n" + "mov v5.16b, v29.16b\n" + "fmla v26.4s, v24.4s, v0.s[1]\n" + "fadd v19.4s, v19.4s, v30.4s\n" + "fmla v16.4s, v30.4s, v0.s[1]\n" + "fadd v15.4s, v15.4s, v22.4s\n" + "fmul v20.4s, v20.4s, v0.s[0]\n" + "fmul v27.4s, v27.4s, v0.s[0]\n" + "fmla v14.4s, v22.4s, v0.s[1]\n" + "mov v28.16b, v11.16b\n" + "fadd v21.4s, v6.4s, v10.4s\n" + "fadd v26.4s, v26.4s, v19.4s\n" + "fsub v10.4s, v6.4s, v10.4s\n" + "str q15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1) + "fadd v12.4s, v11.4s, v20.4s\n" + "str q14, [x28, %[output_col_stride1]]\n" // Store output (2, 1) + "fadd v18.4s, v29.4s, v27.4s\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "fmla v28.4s, v20.4s, v0.s[1]\n" + "str q26, [x18]\n" // Store output (3, 0) + "fadd v13.4s, v13.4s, v21.4s\n" + "str q12, [x17, %[output_col_stride1]]\n" // Store output (1, 1) + "fmul v10.4s, v10.4s, v0.s[0]\n" + "fmla v4.4s, v21.4s, v0.s[1]\n" + "mov v15.16b, v9.16b\n" + "fadd v5.4s, v5.4s, v25.4s\n" + "fadd v28.4s, v28.4s, v18.4s\n" + "str q13, [%[outptr0], x15]\n" // Store output (0, 2) + "fadd v6.4s, v2.4s, v3.4s\n" + "fadd v13.4s, v9.4s, v10.4s\n" + "fmla v15.4s, v10.4s, v0.s[1]\n" + "str q4, [x28, x15]\n" // Store output (2, 2) + "fadd v30.4s, v7.4s, v8.4s\n" + "str q28, [x18, %[output_col_stride1]]\n" // Store output (3, 1) + "fsub v2.4s, v2.4s, v3.4s\n" + "fadd v1.4s, v1.4s, v6.4s\n" + "fsub v8.4s, v7.4s, v8.4s\n" + "str q13, [x17, x15]\n" // Store output (1, 2) + "fadd v15.4s, v15.4s, v16.4s\n" + "mov v6.16b, v6.16b\n" + "mov v9.16b, v2.16b\n" + "fadd v1.4s, v1.4s, v30.4s\n" + "fmul v8.4s, v8.4s, v0.s[0]\n" + "str q15, [x18, x15]\n" // Store output (3, 2) + "fmla v6.4s, v30.4s, v0.s[1]\n" + "str q1, [%[outptr0], x16]\n" // Store output (0, 3) + "fadd v2.4s, v2.4s, v8.4s\n" + "str q6, [x28, x16]\n" // Store output (2, 3) + "fmla v9.4s, v8.4s, v0.s[1]\n" + "add %[outptr0], %[outptr0], #16\n" + "add x28, x28, #16\n" + "str q2, [x17, x16]\n" // Store output (1, 3) + "fadd v9.4s, v9.4s, v5.4s\n" + "add x17, x17, #16\n" + "str q9, [x18, x16]\n" // Store output (3, 3) + "add x18, x18, #16\n" + "4:\n" // Double + "cmp x20, #2\n" + "blt 5f\n" + "ldr d17, [%[inptr0]]\n" + "ldr d23, [%[inptr0], %[in_col_stride1]]\n" + "sub x20, x20, #2\n" + "ldr d27, [%[inptr0], x21]\n" + "ldr d24, [%[inptr0], x22]\n" + "fadd v4.4s, v23.4s, v27.4s\n" + "ldr d11, [%[inptr0], x23]\n" + "fadd v10.4s, v24.4s, v11.4s\n" + "ldr d12, [%[inptr0], x24]\n" + "fsub v13.4s, v23.4s, v27.4s\n" + "ldr d20, [x25]\n" + "fsub v11.4s, v24.4s, v11.4s\n" + "ldr d19, [x25, %[in_col_stride1]]\n" + "fadd v7.4s, v17.4s, v4.4s\n" + "ldr d22, [x25, x21]\n" + "mov v4.16b, v4.16b\n" + "ldr d14, [x25, x22]\n" + "mov v1.16b, v13.16b\n" + "ldr d18, [x25, x23]\n" + "fmul v11.4s, v11.4s, v0.s[0]\n" + "ldr d3, [x25, x24]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "ldr d16, [x13]\n" + "fmla v4.4s, v10.4s, v0.s[1]\n" + "ldr d21, [x13, %[in_col_stride1]]\n" + "fadd v2.4s, v19.4s, v22.4s\n" + "ldr d24, [x13, x21]\n" + "fadd v8.4s, v13.4s, v11.4s\n" + "ldr d25, [x13, x22]\n" + "fmla v1.4s, v11.4s, v0.s[1]\n" + "ldr d17, [x13, x23]\n" + "fadd v23.4s, v14.4s, v18.4s\n" + "ldr d9, [x13, x24]\n" + "fadd v11.4s, v20.4s, v2.4s\n" + "ldr d6, [x26]\n" + "fsub v15.4s, v19.4s, v22.4s\n" + "ldr d19, [x26, %[in_col_stride1]]\n" + "fadd v1.4s, v1.4s, v12.4s\n" + "ldr d22, [x26, x21]\n" + "fsub v31.4s, v14.4s, v18.4s\n" + "ldr d12, [x26, x22]\n" + "fadd v11.4s, v11.4s, v23.4s\n" + "ldr d26, [x26, x23]\n" + "mov v13.16b, v2.16b\n" + "ldr d5, [x26, x24]\n" + "mov v2.16b, v15.16b\n" + "ldr d10, [x14]\n" + "fmul v31.4s, v31.4s, v0.s[0]\n" + "add %[inptr0], %[inptr0], #8\n" + "fmla v13.4s, v23.4s, v0.s[1]\n" + "add x25, x25, #8\n" + "fadd v29.4s, v21.4s, v24.4s\n" + "add x13, x13, #8\n" + "fsub v14.4s, v21.4s, v24.4s\n" + "ldr d30, [x14, %[in_col_stride1]]\n" + "fadd v15.4s, v15.4s, v31.4s\n" + "add x26, x26, #8\n" + "fmla v2.4s, v31.4s, v0.s[1]\n" + "fadd v18.4s, v25.4s, v17.4s\n" + "fadd v27.4s, v16.4s, v29.4s\n" + "fsub v28.4s, v25.4s, v17.4s\n" + "mov v21.16b, v29.16b\n" + "fadd v20.4s, v19.4s, v22.4s\n" + "fsub v17.4s, v19.4s, v22.4s\n" + "ldr d31, [x14, x21]\n" + "fadd v2.4s, v2.4s, v3.4s\n" + "ldr d23, [x14, x22]\n" + "fadd v27.4s, v27.4s, v18.4s\n" + "fmul v28.4s, v28.4s, v0.s[0]\n" + "fmla v21.4s, v18.4s, v0.s[1]\n" + "fadd v29.4s, v12.4s, v26.4s\n" + "fadd v24.4s, v6.4s, v20.4s\n" + "fsub v16.4s, v12.4s, v26.4s\n" + "mov v6.16b, v20.16b\n" + "fadd v25.4s, v30.4s, v31.4s\n" + "fsub v22.4s, v30.4s, v31.4s\n" + "fadd v31.4s, v11.4s, v27.4s\n" + "fsub v12.4s, v11.4s, v27.4s\n" + "ldr d26, [x14, x23]\n" + "fadd v24.4s, v24.4s, v29.4s\n" + "fmul v16.4s, v16.4s, v0.s[0]\n" + "fmla v6.4s, v29.4s, v0.s[1]\n" + "mov v3.16b, v14.16b\n" + "fadd v20.4s, v14.4s, v28.4s\n" + "fadd v29.4s, v10.4s, v25.4s\n" + "mov v10.16b, v25.16b\n" + "fadd v25.4s, v7.4s, v31.4s\n" + "fmla v3.4s, v28.4s, v0.s[1]\n" + "fadd v14.4s, v23.4s, v26.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "mov v26.16b, v31.16b\n" + "fadd v31.4s, v15.4s, v20.4s\n" + "fsub v11.4s, v15.4s, v20.4s\n" + "fadd v20.4s, v17.4s, v16.4s\n" + "mov v7.16b, v17.16b\n" + "fadd v3.4s, v3.4s, v9.4s\n" + "ldr d18, [x14, x24]\n" + "fadd v29.4s, v29.4s, v14.4s\n" + "add x14, x14, #8\n" + "fmla v7.4s, v16.4s, v0.s[1]\n" + "ldr d19, [x27]\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fmla v10.4s, v14.4s, v0.s[1]\n" + "fadd v15.4s, v8.4s, v31.4s\n" + "mov v14.16b, v31.16b\n" + "fadd v28.4s, v24.4s, v29.4s\n" + "fsub v24.4s, v24.4s, v29.4s\n" + "fadd v7.4s, v7.4s, v5.4s\n" + "ldr d27, [x27, %[in_col_stride1]]\n" + "fadd v30.4s, v13.4s, v21.4s\n" + "fsub v9.4s, v13.4s, v21.4s\n" + "fadd v17.4s, v22.4s, v23.4s\n" + "mov v8.16b, v22.16b\n" + "fadd v25.4s, v25.4s, v28.4s\n" + "fmul v24.4s, v24.4s, v0.s[0]\n" + "fmla v26.4s, v28.4s, v0.s[1]\n" + "ldr d29, [x27, x21]\n" + "fmla v8.4s, v23.4s, v0.s[1]\n" + "ldr d28, [x27, x22]\n" + "fadd v13.4s, v4.4s, v30.4s\n" + "mov v4.16b, v30.16b\n" + "str d25, [%[outptr0]]\n" // Store output (0, 0) + "fadd v16.4s, v27.4s, v29.4s\n" + "str d26, [x28]\n" // Store output (2, 0) + "fsub v29.4s, v27.4s, v29.4s\n" + "fadd v8.4s, v8.4s, v18.4s\n" + "ldr d23, [x27, x23]\n" + "fadd v30.4s, v28.4s, v23.4s\n" + "ldr d25, [x27, x24]\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "add x27, x27, #8\n" + "fsub v27.4s, v28.4s, v23.4s\n" + "mov v16.16b, v16.16b\n" + "fadd v22.4s, v20.4s, v17.4s\n" + "fsub v20.4s, v20.4s, v17.4s\n" + "fadd v21.4s, v12.4s, v24.4s\n" + "mov v26.16b, v12.16b\n" + "fadd v19.4s, v19.4s, v30.4s\n" + "fmla v16.4s, v30.4s, v0.s[1]\n" + "fmul v27.4s, v27.4s, v0.s[0]\n" + "mov v5.16b, v29.16b\n" + "fmla v26.4s, v24.4s, v0.s[1]\n" + "fadd v15.4s, v15.4s, v22.4s\n" + "str d21, [x17]\n" // Store output (1, 0) + "fmul v20.4s, v20.4s, v0.s[0]\n" + "fmla v14.4s, v22.4s, v0.s[1]\n" + "mov v28.16b, v11.16b\n" + "fadd v18.4s, v29.4s, v27.4s\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "str d15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1) + "fadd v26.4s, v26.4s, v19.4s\n" + "fadd v12.4s, v11.4s, v20.4s\n" + "fmla v28.4s, v20.4s, v0.s[1]\n" + "str d14, [x28, %[output_col_stride1]]\n" // Store output (2, 1) + "fadd v21.4s, v6.4s, v10.4s\n" + "fadd v5.4s, v5.4s, v25.4s\n" + "fsub v10.4s, v6.4s, v10.4s\n" + "str d26, [x18]\n" // Store output (3, 0) + "mov v15.16b, v9.16b\n" + "str d12, [x17, %[output_col_stride1]]\n" // Store output (1, 1) + "fadd v28.4s, v28.4s, v18.4s\n" + "fadd v13.4s, v13.4s, v21.4s\n" + "fmla v4.4s, v21.4s, v0.s[1]\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "fadd v6.4s, v2.4s, v3.4s\n" + "fadd v30.4s, v7.4s, v8.4s\n" + "fsub v2.4s, v2.4s, v3.4s\n" + "str d28, [x18, %[output_col_stride1]]\n" // Store output (3, 1) + "fsub v8.4s, v7.4s, v8.4s\n" + "str d13, [%[outptr0], x15]\n" // Store output (0, 2) + "str d4, [x28, x15]\n" // Store output (2, 2) + "fadd v13.4s, v9.4s, v10.4s\n" + "fmla v15.4s, v10.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v6.4s\n" + "mov v6.16b, v6.16b\n" + "fmul v8.4s, v8.4s, v0.s[0]\n" + "mov v9.16b, v2.16b\n" + "str d13, [x17, x15]\n" // Store output (1, 2) + "fadd v15.4s, v15.4s, v16.4s\n" + "fadd v1.4s, v1.4s, v30.4s\n" + "fmla v6.4s, v30.4s, v0.s[1]\n" + "fadd v2.4s, v2.4s, v8.4s\n" + "fmla v9.4s, v8.4s, v0.s[1]\n" + "str d15, [x18, x15]\n" // Store output (3, 2) + "str d1, [%[outptr0], x16]\n" // Store output (0, 3) + "str d2, [x17, x16]\n" // Store output (1, 3) + "fadd v9.4s, v9.4s, v5.4s\n" + "str d6, [x28, x16]\n" // Store output (2, 3) + "add %[outptr0], %[outptr0], #8\n" + "add x17, x17, #8\n" + "add x28, x28, #8\n" + "str d9, [x18, x16]\n" // Store output (3, 3) + "add x18, x18, #8\n" + "5:\n" // Scalar + "cbz x20, 6f\n" + "ldr s17, [%[inptr0]]\n" + "ldr s23, [%[inptr0], %[in_col_stride1]]\n" + "ldr s27, [%[inptr0], x21]\n" + "fadd v4.4s, v23.4s, v27.4s\n" + "ldr s24, [%[inptr0], x22]\n" + "fsub v13.4s, v23.4s, v27.4s\n" + "ldr s11, [%[inptr0], x23]\n" + "fadd v10.4s, v24.4s, v11.4s\n" + "ldr s12, [%[inptr0], x24]\n" + "fsub v11.4s, v24.4s, v11.4s\n" + "ldr s20, [x25]\n" + "fadd v7.4s, v17.4s, v4.4s\n" + "ldr s19, [x25, %[in_col_stride1]]\n" + "mov v4.16b, v4.16b\n" + "ldr s22, [x25, x21]\n" + "mov v1.16b, v13.16b\n" + "ldr s14, [x25, x22]\n" + "fmul v11.4s, v11.4s, v0.s[0]\n" + "ldr s18, [x25, x23]\n" + "fadd v7.4s, v7.4s, v10.4s\n" + "ldr s3, [x25, x24]\n" + "fmla v4.4s, v10.4s, v0.s[1]\n" + "ldr s16, [x13]\n" + "fadd v2.4s, v19.4s, v22.4s\n" + "ldr s21, [x13, %[in_col_stride1]]\n" + "fadd v8.4s, v13.4s, v11.4s\n" + "ldr s24, [x13, x21]\n" + "fmla v1.4s, v11.4s, v0.s[1]\n" + "ldr s25, [x13, x22]\n" + "fadd v23.4s, v14.4s, v18.4s\n" + "ldr s17, [x13, x23]\n" + "fadd v11.4s, v20.4s, v2.4s\n" + "ldr s9, [x13, x24]\n" + "fsub v15.4s, v19.4s, v22.4s\n" + "ldr s6, [x26]\n" + "fadd v1.4s, v1.4s, v12.4s\n" + "ldr s19, [x26, %[in_col_stride1]]\n" + "fsub v31.4s, v14.4s, v18.4s\n" + "ldr s22, [x26, x21]\n" + "fadd v11.4s, v11.4s, v23.4s\n" + "ldr s12, [x26, x22]\n" + "mov v13.16b, v2.16b\n" + "ldr s26, [x26, x23]\n" + "mov v2.16b, v15.16b\n" + "ldr s5, [x26, x24]\n" + "fmul v31.4s, v31.4s, v0.s[0]\n" + "ldr s10, [x14]\n" + "fmla v13.4s, v23.4s, v0.s[1]\n" + "fadd v29.4s, v21.4s, v24.4s\n" + "fsub v14.4s, v21.4s, v24.4s\n" + "fadd v18.4s, v25.4s, v17.4s\n" + "fsub v28.4s, v25.4s, v17.4s\n" + "ldr s30, [x14, %[in_col_stride1]]\n" + "fadd v15.4s, v15.4s, v31.4s\n" + "fmla v2.4s, v31.4s, v0.s[1]\n" + "fadd v27.4s, v16.4s, v29.4s\n" + "mov v21.16b, v29.16b\n" + "fadd v20.4s, v19.4s, v22.4s\n" + "fsub v17.4s, v19.4s, v22.4s\n" + "fmul v28.4s, v28.4s, v0.s[0]\n" + "ldr s31, [x14, x21]\n" + "fadd v2.4s, v2.4s, v3.4s\n" + "ldr s23, [x14, x22]\n" + "fadd v27.4s, v27.4s, v18.4s\n" + "fmla v21.4s, v18.4s, v0.s[1]\n" + "fadd v29.4s, v12.4s, v26.4s\n" + "fadd v24.4s, v6.4s, v20.4s\n" + "fsub v16.4s, v12.4s, v26.4s\n" + "mov v6.16b, v20.16b\n" + "fadd v25.4s, v30.4s, v31.4s\n" + "fsub v22.4s, v30.4s, v31.4s\n" + "fadd v20.4s, v14.4s, v28.4s\n" + "mov v3.16b, v14.16b\n" + "fadd v24.4s, v24.4s, v29.4s\n" + "fmla v6.4s, v29.4s, v0.s[1]\n" + "fmul v16.4s, v16.4s, v0.s[0]\n" + "ldr s26, [x14, x23]\n" + "fmla v3.4s, v28.4s, v0.s[1]\n" + "fadd v14.4s, v23.4s, v26.4s\n" + "fadd v29.4s, v10.4s, v25.4s\n" + "fsub v23.4s, v23.4s, v26.4s\n" + "mov v10.16b, v25.16b\n" + "fadd v31.4s, v11.4s, v27.4s\n" + "fsub v12.4s, v11.4s, v27.4s\n" + "ldr s18, [x14, x24]\n" + "fadd v3.4s, v3.4s, v9.4s\n" + "ldr s19, [x27]\n" + "fadd v29.4s, v29.4s, v14.4s\n" + "fmul v23.4s, v23.4s, v0.s[0]\n" + "fmla v10.4s, v14.4s, v0.s[1]\n" + "fadd v25.4s, v7.4s, v31.4s\n" + "mov v26.16b, v31.16b\n" + "fadd v31.4s, v15.4s, v20.4s\n" + "fsub v11.4s, v15.4s, v20.4s\n" + "fadd v30.4s, v13.4s, v21.4s\n" + "fsub v9.4s, v13.4s, v21.4s\n" + "fadd v28.4s, v24.4s, v29.4s\n" + "fsub v24.4s, v24.4s, v29.4s\n" + "ldr s27, [x27, %[in_col_stride1]]\n" + "fadd v15.4s, v8.4s, v31.4s\n" + "mov v14.16b, v31.16b\n" + "fadd v13.4s, v4.4s, v30.4s\n" + "mov v4.16b, v30.16b\n" + "fadd v25.4s, v25.4s, v28.4s\n" + "fmla v26.4s, v28.4s, v0.s[1]\n" + "fmul v24.4s, v24.4s, v0.s[0]\n" + "fadd v21.4s, v6.4s, v10.4s\n" + "fsub v10.4s, v6.4s, v10.4s\n" + "fadd v6.4s, v2.4s, v3.4s\n" + "fsub v2.4s, v2.4s, v3.4s\n" + "ldr s29, [x27, x21]\n" + "str s25, [%[outptr0]]\n" // Store output (0, 0) + "fadd v20.4s, v17.4s, v16.4s\n" + "str s26, [x28]\n" // Store output (2, 0) + "mov v7.16b, v17.16b\n" + "fadd v17.4s, v22.4s, v23.4s\n" + "mov v8.16b, v22.16b\n" + "fadd v13.4s, v13.4s, v21.4s\n" + "fmul v10.4s, v10.4s, v0.s[0]\n" + "fmla v7.4s, v16.4s, v0.s[1]\n" + "ldr s28, [x27, x22]\n" + "fmla v8.4s, v23.4s, v0.s[1]\n" + "ldr s23, [x27, x23]\n" + "fmla v4.4s, v21.4s, v0.s[1]\n" + "ldr s25, [x27, x24]\n" + "str s13, [%[outptr0], x15]\n" // Store output (0, 2) + "fadd v16.4s, v27.4s, v29.4s\n" + "fadd v7.4s, v7.4s, v5.4s\n" + "fadd v30.4s, v28.4s, v23.4s\n" + "fadd v8.4s, v8.4s, v18.4s\n" + "fsub v29.4s, v27.4s, v29.4s\n" + "str s4, [x28, x15]\n" // Store output (2, 2) + "fsub v27.4s, v28.4s, v23.4s\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "mov v16.16b, v16.16b\n" + "fadd v21.4s, v12.4s, v24.4s\n" + "mov v26.16b, v12.16b\n" + "mov v5.16b, v29.16b\n" + "fadd v22.4s, v20.4s, v17.4s\n" + "fmul v27.4s, v27.4s, v0.s[0]\n" + "fmla v16.4s, v30.4s, v0.s[1]\n" + "fadd v19.4s, v19.4s, v30.4s\n" + "fmla v26.4s, v24.4s, v0.s[1]\n" + "str s21, [x17]\n" // Store output (1, 0) + "fsub v20.4s, v20.4s, v17.4s\n" + "fadd v15.4s, v15.4s, v22.4s\n" + "fmla v14.4s, v22.4s, v0.s[1]\n" + "fadd v18.4s, v29.4s, v27.4s\n" + "fmla v5.4s, v27.4s, v0.s[1]\n" + "fadd v26.4s, v26.4s, v19.4s\n" + "mov v28.16b, v11.16b\n" + "fmul v20.4s, v20.4s, v0.s[0]\n" + "fadd v13.4s, v9.4s, v10.4s\n" + "str s15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1) + "mov v15.16b, v9.16b\n" + "str s14, [x28, %[output_col_stride1]]\n" // Store output (2, 1) + "fadd v5.4s, v5.4s, v25.4s\n" + "str s26, [x18]\n" // Store output (3, 0) + "fadd v30.4s, v7.4s, v8.4s\n" + "str s13, [x17, x15]\n" // Store output (1, 2) + "fadd v12.4s, v11.4s, v20.4s\n" + "fmla v28.4s, v20.4s, v0.s[1]\n" + "fmla v15.4s, v10.4s, v0.s[1]\n" + "fadd v1.4s, v1.4s, v6.4s\n" + "fsub v8.4s, v7.4s, v8.4s\n" + "mov v6.16b, v6.16b\n" + "mov v9.16b, v2.16b\n" + "str s12, [x17, %[output_col_stride1]]\n" // Store output (1, 1) + "fadd v28.4s, v28.4s, v18.4s\n" + "fadd v15.4s, v15.4s, v16.4s\n" + "fadd v1.4s, v1.4s, v30.4s\n" + "fmul v8.4s, v8.4s, v0.s[0]\n" + "fmla v6.4s, v30.4s, v0.s[1]\n" + "str s28, [x18, %[output_col_stride1]]\n" // Store output (3, 1) + "str s1, [%[outptr0], x16]\n" // Store output (0, 3) + "str s6, [x28, x16]\n" // Store output (2, 3) + "fadd v2.4s, v2.4s, v8.4s\n" + "str s15, [x18, x15]\n" // Store output (3, 2) + "fmla v9.4s, v8.4s, v0.s[1]\n" + "str s2, [x17, x16]\n" // Store output (1, 3) + "fadd v9.4s, v9.4s, v5.4s\n" + "str s9, [x18, x16]\n" // Store output (3, 3) + "6:\n" // End + : [outptr0] "+r" (output), [inptr0] "+r" (inptr) + : [output_col_stride1] "r" (output_col_stride * sizeof(float)), [pcoeffs] "r" (coeffs), [n_channels] "r" ((long) n_channels), [in_row_stride] "r" (6 * matrix_stride * sizeof(float)), [in_col_stride1] "r" (matrix_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)) + : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" + ); + } +} + +#else + +template <> +void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile( + const int n_channels, + const float* inptr, + const int matrix_stride, + const float* bptr, + float* const output, + const int output_row_stride, + const int output_col_stride +) +{ + // Construct a map to the output cells + float *outptrs[output_tile_rows][output_tile_cols]; + for (int i = 0; i < output_tile_rows; i++) + { + for (int j = 0; j < output_tile_cols; j++) + { + outptrs[i][j] = output + i*output_row_stride + j*output_col_stride; + } + } + + // For each channel of the output + int channels_remaining = n_channels; +#ifdef __arm__ + for (; channels_remaining >= 2; channels_remaining -= 2) + { + // Matrices used and computed during this transform + float32x2_t F[6][6], FZ[6][4], f[4][4], b; + + // Read a 6x6 tile in the Winograd domain + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + F[i][j] = vld1_f32(inptr + m*matrix_stride); + } + } + inptr += 2; + + // Compute the matrix F Z + for (int i = 0; i < 6; i++) + { + // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; + FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]); + + // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4]; + FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f); + + // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4]; + FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f); + + // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5]; + FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]); + } + + // Compute the output tile f = ZT F Z + for (int j = 0; j < 4; j++) + { + // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; + f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]); + + // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j]; + f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f); + + // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j]; + f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f); + + // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j]; + f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]); + } + + // Write out the output tile + if (bptr != nullptr) + { + b = vld1_f32(bptr); + bptr += 2; + } + else + { + b = vdup_n_f32(0.0f); + } + for (int i = 0; i < output_tile_rows; i++) + { + for (int j = 0; j < output_tile_cols; j++) + { + vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b)); + outptrs[i][j] += 2; + } + } + } +#endif // __arm__ + for (; channels_remaining; channels_remaining--) + { + // Matrices used and computed during this transform + float F[6][6], FZ[6][4], f[4][4], b; + + // Read a 6x6 tile in the Winograd domain + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + F[i][j] = *(inptr + m*matrix_stride); + } + } + inptr++; + + // Compute the matrix F Z + for (int i = 0; i < 6; i++) + { + FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; + FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4]; + FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4]; + FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5]; + } + + // Compute the output tile f = ZT F Z + for (int j = 0; j < 4; j++) + { + f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; + f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j]; + f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j]; + f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j]; + } + + // Write out the output tile + if (bptr != nullptr) + { + b = *(bptr++); + } + else + { + b = 0.0f; + } + for (int i = 0; i < output_tile_rows; i++) + { + for (int j = 0; j < output_tile_cols; j++) + { + *(outptrs[i][j]++) = f[i][j] + b; + } + } + } +} + +#endif + +template class OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>; + +} // namespace winograd -- cgit v1.2.1