aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution/winograd/input_transforms
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/convolution/winograd/input_transforms')
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp16_6x6.cpp280
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp1140
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp155
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_4x4.cpp251
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp202
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp363
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp361
7 files changed, 2752 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp16_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp16_6x6.cpp
new file mode 100644
index 0000000000..ad759b225e
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp16_6x6.cpp
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+#include <arm_neon.h>
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void a64_fp16_6x6(
+ const unsigned int n_channels,
+ const __fp16* const input_base,
+ const size_t input_row_stride,
+ const size_t input_col_stride,
+ __fp16* outptr,
+ const size_t matrix_stride
+)
+{
+ constexpr int inner_tile_rows = 6;
+ constexpr int inner_tile_cols = 6;
+
+ // Get pointers into the input tile
+ const __fp16 *x_ptrs[inner_tile_rows][inner_tile_cols];
+ for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
+ {
+ // Get a pointer into the row
+ const __fp16* const row_ptr = input_base + xi*input_row_stride;
+
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+ {
+ x_ptrs[i][j] = row_ptr + xj*input_col_stride;
+ }
+ }
+
+ // Matrices used/computed in this kernel.
+ __fp16 x[inner_tile_rows][inner_tile_cols];
+ __fp16 XTx[inner_tile_rows][inner_tile_cols];
+ __fp16 U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = XTx[i][j] = 0.0f;
+ }
+ }
+
+ // Perform the Winograd input transformation for each channel in the input
+ // tensor.
+ int channels_remaining = n_channels;
+ for (; channels_remaining >= 8; channels_remaining -= 8)
+ {
+ // Matrices used/computed in this kernel
+ float16x8_t x[inner_tile_rows][inner_tile_cols];
+ float16x8_t XTx[inner_tile_rows][inner_tile_cols];
+ float16x8_t U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vdupq_n_f16(0.0f);
+ XTx[i][j] = vdupq_n_f16(0.0f);
+ }
+ }
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vld1q_f16(x_ptrs[i][j]);
+ x_ptrs[i][j] += 8;
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[0][j] = vsubq_f16(vaddq_f16(x[4][j], vmulq_f16(x[0][j], vdupq_n_f16(4.0f))), vmulq_f16(x[2][j], vdupq_n_f16(5.0f)));
+
+ // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[1][j] = vsubq_f16(vaddq_f16(x[3][j], x[4][j]), vmulq_f16(vaddq_f16(x[1][j], x[2][j]), vdupq_n_f16(4.0f)));
+
+ // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = vaddq_f16(vsubq_f16(x[4][j], x[3][j]), vmulq_f16(vsubq_f16(x[1][j], x[2][j]), vdupq_n_f16(4.0f)));
+
+ // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[3][j] = vaddq_f16(vsubq_f16(x[4][j], x[2][j]), vmulq_f16(vsubq_f16(x[3][j], x[1][j]), vdupq_n_f16(2.0f)));
+
+ // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = vaddq_f16(vsubq_f16(x[4][j], x[2][j]), vmulq_f16(vsubq_f16(x[1][j], x[3][j]), vdupq_n_f16(2.0f)));
+
+ // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ XTx[5][j] = vsubq_f16(vaddq_f16(x[5][j], vmulq_f16(x[1][j], vdupq_n_f16(4.0f))), vmulq_f16(x[3][j], vdupq_n_f16(5.0f)));
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][0] = vsubq_f16(vaddq_f16(XTx[i][4], vmulq_f16(XTx[i][0], vdupq_n_f16(4.0f))), vmulq_f16(XTx[i][2], vdupq_n_f16(5.0f)));
+
+ // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][1] = vsubq_f16(vaddq_f16(XTx[i][3], XTx[i][4]), vmulq_f16(vaddq_f16(XTx[i][1], XTx[i][2]), vdupq_n_f16(4.0f)));
+
+ // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = vaddq_f16(vsubq_f16(XTx[i][4], XTx[i][3]), vmulq_f16(vsubq_f16(XTx[i][1], XTx[i][2]), vdupq_n_f16(4.0f)));
+
+ // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = vaddq_f16(vsubq_f16(XTx[i][4], XTx[i][2]), vmulq_f16(vsubq_f16(XTx[i][3], XTx[i][1]), vdupq_n_f16(2.0f)));
+
+ // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = vaddq_f16(vsubq_f16(XTx[i][4], XTx[i][2]), vmulq_f16(vsubq_f16(XTx[i][1], XTx[i][3]), vdupq_n_f16(2.0f)));
+
+ // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ U[i][5] = vsubq_f16(vaddq_f16(XTx[i][5], vmulq_f16(XTx[i][1], vdupq_n_f16(4.0f))), vmulq_f16(XTx[i][3], vdupq_n_f16(5.0f)));
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ vst1q_f16(outptr + m*matrix_stride, U[i][j]);
+ }
+ }
+ outptr += 8;
+ }
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used/computed in this kernel
+ float16x4_t x[inner_tile_rows][inner_tile_cols];
+ float16x4_t XTx[inner_tile_rows][inner_tile_cols];
+ float16x4_t U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vdup_n_f16(0.0f);
+ XTx[i][j] = vdup_n_f16(0.0f);
+ }
+ }
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vld1_f16(x_ptrs[i][j]);
+ x_ptrs[i][j] += 4;
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[0][j] = vsub_f16(vadd_f16(x[4][j], vmul_f16(x[0][j], vdup_n_f16(4.0f))), vmul_f16(x[2][j], vdup_n_f16(5.0f)));
+
+ // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[1][j] = vsub_f16(vadd_f16(x[3][j], x[4][j]), vmul_f16(vadd_f16(x[1][j], x[2][j]), vdup_n_f16(4.0f)));
+
+ // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = vadd_f16(vsub_f16(x[4][j], x[3][j]), vmul_f16(vsub_f16(x[1][j], x[2][j]), vdup_n_f16(4.0f)));
+
+ // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[3][j] = vadd_f16(vsub_f16(x[4][j], x[2][j]), vmul_f16(vsub_f16(x[3][j], x[1][j]), vdup_n_f16(2.0f)));
+
+ // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = vadd_f16(vsub_f16(x[4][j], x[2][j]), vmul_f16(vsub_f16(x[1][j], x[3][j]), vdup_n_f16(2.0f)));
+
+ // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ XTx[5][j] = vsub_f16(vadd_f16(x[5][j], vmul_f16(x[1][j], vdup_n_f16(4.0f))), vmul_f16(x[3][j], vdup_n_f16(5.0f)));
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][0] = vsub_f16(vadd_f16(XTx[i][4], vmul_f16(XTx[i][0], vdup_n_f16(4.0f))), vmul_f16(XTx[i][2], vdup_n_f16(5.0f)));
+
+ // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][1] = vsub_f16(vadd_f16(XTx[i][3], XTx[i][4]), vmul_f16(vadd_f16(XTx[i][1], XTx[i][2]), vdup_n_f16(4.0f)));
+
+ // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = vadd_f16(vsub_f16(XTx[i][4], XTx[i][3]), vmul_f16(vsub_f16(XTx[i][1], XTx[i][2]), vdup_n_f16(4.0f)));
+
+ // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = vadd_f16(vsub_f16(XTx[i][4], XTx[i][2]), vmul_f16(vsub_f16(XTx[i][3], XTx[i][1]), vdup_n_f16(2.0f)));
+
+ // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = vadd_f16(vsub_f16(XTx[i][4], XTx[i][2]), vmul_f16(vsub_f16(XTx[i][1], XTx[i][3]), vdup_n_f16(2.0f)));
+
+ // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ U[i][5] = vsub_f16(vadd_f16(XTx[i][5], vmul_f16(XTx[i][1], vdup_n_f16(4.0f))), vmul_f16(XTx[i][3], vdup_n_f16(5.0f)));
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ vst1_f16(outptr + m*matrix_stride, U[i][j]);
+ }
+ }
+ outptr += 4;
+ }
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Load x
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = *(x_ptrs[i][j]++);
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = U[i][j];
+ }
+ }
+ outptr++;
+ }
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
+
+#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp
new file mode 100644
index 0000000000..a2c04e0d8d
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp
@@ -0,0 +1,1140 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef __aarch64__
+
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void a64_fp32_6x6(
+ unsigned int n_channels,
+ const float *input_base,
+ const size_t input_row_stride,
+ const size_t input_col_stride,
+ float *matrix_base,
+ const size_t matrix_stride
+)
+{
+ const float pcoeffs[4] = {1.0f, 2.0f, 4.0f, 5.0f};
+ __asm__ __volatile__(
+ "ldr q0, [%[pcoeffs]]\n"
+ "add x25, %[inptr0], %[input_row_stride]\n"
+ "add x10, %[input_col_stride1], %[input_col_stride1]\n"
+ "add x16, x25, %[input_row_stride]\n"
+ "add x8, x10, %[input_col_stride1]\n"
+ "add x26, x16, %[input_row_stride]\n"
+ "add x20, x8, %[input_col_stride1]\n"
+ "add x17, x26, %[input_row_stride]\n"
+ "add x21, x20, %[input_col_stride1]\n"
+ "add x27, x17, %[input_row_stride]\n"
+ "add x28, %[outptr0], %[output_row_stride]\n"
+ "add x11, %[output_col_stride1], %[output_col_stride1]\n"
+ "add x22, x28, %[output_row_stride]\n"
+ "add x13, x11, %[output_col_stride1]\n"
+ "add x12, x22, %[output_row_stride]\n"
+ "add x23, x13, %[output_col_stride1]\n"
+ "add x14, x12, %[output_row_stride]\n"
+ "add x15, x23, %[output_col_stride1]\n"
+ "add x24, x14, %[output_row_stride]\n"
+ "cmp %w[n_channels], #4\n"
+ "blt 2f\n"
+ "1:\n"
+ "ldr q8, [%[inptr0], x20]\n"
+ "ldr q2, [%[inptr0], x10]\n"
+ "mov v14.16b, v8.16b\n"
+ "ldr q9, [%[inptr0]]\n"
+ "mov v10.16b, v8.16b\n"
+ "ldr q1, [%[inptr0], x21]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "ldr q4, [%[inptr0], x8]\n"
+ "mov v9.16b, v8.16b\n"
+ "ldr q12, [%[inptr0], %[input_col_stride1]]\n"
+ "fmls v10.4s, v12.4s, v0.s[2]\n"
+ "ldr q5, [x16, x20]\n"
+ "fmls v14.4s, v2.4s, v0.s[3]\n"
+ "ldr q20, [x16, x10]\n"
+ "fmla v9.4s, v12.4s, v0.s[2]\n"
+ "ldr q3, [x16]\n"
+ "fmls v10.4s, v2.4s, v0.s[2]\n"
+ "ldr q6, [x16, x21]\n"
+ "mov v7.16b, v8.16b\n"
+ "ldr q16, [x16, x8]\n"
+ "fmls v9.4s, v2.4s, v0.s[2]\n"
+ "ldr q22, [x16, %[input_col_stride1]]\n"
+ "fadd v10.4s, v10.4s, v4.4s\n"
+ "ldr q17, [x17, x20]\n"
+ "fmls v7.4s, v12.4s, v0.s[1]\n"
+ "ldr q15, [x17, x10]\n"
+ "fsub v9.4s, v9.4s, v4.4s\n"
+ "ldr q19, [x17]\n"
+ "mov v8.16b, v8.16b\n"
+ "ldr q18, [x17, x21]\n"
+ "fsub v7.4s, v7.4s, v2.4s\n"
+ "ldr q13, [x17, x8]\n"
+ "fmla v7.4s, v4.4s, v0.s[1]\n"
+ "ldr q21, [x17, %[input_col_stride1]]\n"
+ "fmla v8.4s, v12.4s, v0.s[1]\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "mov v11.16b, v1.16b\n"
+ "add x16, x16, #16\n"
+ "mov v1.16b, v5.16b\n"
+ "add x17, x17, #16\n"
+ "fsub v8.4s, v8.4s, v2.4s\n"
+ "fmla v11.4s, v12.4s, v0.s[2]\n"
+ "fmls v8.4s, v4.4s, v0.s[1]\n"
+ "fmla v1.4s, v3.4s, v0.s[2]\n"
+ "mov v2.16b, v5.16b\n"
+ "mov v3.16b, v5.16b\n"
+ "fmls v11.4s, v4.4s, v0.s[3]\n"
+ "mov v4.16b, v5.16b\n"
+ "fmls v1.4s, v20.4s, v0.s[3]\n"
+ "fmls v2.4s, v22.4s, v0.s[2]\n"
+ "fmla v3.4s, v22.4s, v0.s[2]\n"
+ "fmls v4.4s, v22.4s, v0.s[1]\n"
+ "mov v5.16b, v5.16b\n"
+ "mov v6.16b, v6.16b\n"
+ "fmls v2.4s, v20.4s, v0.s[2]\n"
+ "mov v12.16b, v17.16b\n"
+ "fmls v3.4s, v20.4s, v0.s[2]\n"
+ "fsub v4.4s, v4.4s, v20.4s\n"
+ "fmla v4.4s, v16.4s, v0.s[1]\n"
+ "fmla v5.4s, v22.4s, v0.s[1]\n"
+ "fadd v2.4s, v2.4s, v16.4s\n"
+ "fmla v6.4s, v22.4s, v0.s[2]\n"
+ "fsub v3.4s, v3.4s, v16.4s\n"
+ "fmla v12.4s, v19.4s, v0.s[2]\n"
+ "fsub v5.4s, v5.4s, v20.4s\n"
+ "mov v19.16b, v17.16b\n"
+ "fmls v5.4s, v16.4s, v0.s[1]\n"
+ "fmls v6.4s, v16.4s, v0.s[3]\n"
+ "fmls v12.4s, v15.4s, v0.s[3]\n"
+ "fmls v19.4s, v21.4s, v0.s[2]\n"
+ "mov v20.16b, v17.16b\n"
+ "mov v16.16b, v17.16b\n"
+ "mov v17.16b, v17.16b\n"
+ "mov v18.16b, v18.16b\n"
+ "fmls v19.4s, v15.4s, v0.s[2]\n"
+ "fmla v20.4s, v21.4s, v0.s[2]\n"
+ "fmls v16.4s, v21.4s, v0.s[1]\n"
+ "fmla v17.4s, v21.4s, v0.s[1]\n"
+ "fmla v18.4s, v21.4s, v0.s[2]\n"
+ "mov v23.16b, v12.16b\n"
+ "fadd v19.4s, v19.4s, v13.4s\n"
+ "fmls v20.4s, v15.4s, v0.s[2]\n"
+ "fsub v16.4s, v16.4s, v15.4s\n"
+ "fsub v17.4s, v17.4s, v15.4s\n"
+ "fmla v16.4s, v13.4s, v0.s[1]\n"
+ "fmls v17.4s, v13.4s, v0.s[1]\n"
+ "fsub v20.4s, v20.4s, v13.4s\n"
+ "fmls v18.4s, v13.4s, v0.s[3]\n"
+ "fmla v23.4s, v14.4s, v0.s[2]\n"
+ "mov v15.16b, v19.16b\n"
+ "mov v14.16b, v20.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "fmla v15.4s, v10.4s, v0.s[2]\n"
+ "mov v10.16b, v17.16b\n"
+ "fmls v23.4s, v1.4s, v0.s[3]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "fmla v24.4s, v7.4s, v0.s[2]\n"
+ "fmla v10.4s, v8.4s, v0.s[2]\n"
+ "fmls v15.4s, v2.4s, v0.s[3]\n"
+ "mov v7.16b, v18.16b\n"
+ "str q23, [%[outptr0]]\n"
+ "fmls v14.4s, v3.4s, v0.s[3]\n"
+ "fmls v24.4s, v4.4s, v0.s[3]\n"
+ "fmls v10.4s, v5.4s, v0.s[3]\n"
+ "str q15, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v7.4s, v11.4s, v0.s[2]\n"
+ "str q14, [%[outptr0], x11]\n"
+ "str q24, [%[outptr0], x13]\n"
+ "str q10, [%[outptr0], x23]\n"
+ "fmls v7.4s, v6.4s, v0.s[3]\n"
+ "str q7, [%[outptr0], x15]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v25.16b, v19.16b\n"
+ "ldr q11, [x25, x20]\n"
+ "mov v10.16b, v11.16b\n"
+ "ldr q23, [x25, x10]\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr q7, [x25]\n"
+ "fmla v10.4s, v7.4s, v0.s[2]\n"
+ "ldr q13, [x25, x21]\n"
+ "mov v7.16b, v11.16b\n"
+ "ldr q31, [x25, x8]\n"
+ "mov v8.16b, v11.16b\n"
+ "ldr q21, [x25, %[input_col_stride1]]\n"
+ "fmls v10.4s, v23.4s, v0.s[3]\n"
+ "ldr q30, [x26, x20]\n"
+ "fmls v9.4s, v21.4s, v0.s[2]\n"
+ "ldr q29, [x26, x10]\n"
+ "fmla v7.4s, v21.4s, v0.s[2]\n"
+ "ldr q22, [x26]\n"
+ "fmls v8.4s, v21.4s, v0.s[1]\n"
+ "ldr q24, [x26, x21]\n"
+ "fmls v9.4s, v23.4s, v0.s[2]\n"
+ "ldr q27, [x26, x8]\n"
+ "fmls v7.4s, v23.4s, v0.s[2]\n"
+ "ldr q28, [x26, %[input_col_stride1]]\n"
+ "fsub v8.4s, v8.4s, v23.4s\n"
+ "add x25, x25, #16\n"
+ "fadd v9.4s, v9.4s, v31.4s\n"
+ "add x26, x26, #16\n"
+ "fsub v7.4s, v7.4s, v31.4s\n"
+ "fmla v8.4s, v31.4s, v0.s[1]\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v15.16b, v13.16b\n"
+ "mov v14.16b, v30.16b\n"
+ "mov v13.16b, v30.16b\n"
+ "fmla v11.4s, v21.4s, v0.s[1]\n"
+ "fmla v15.4s, v21.4s, v0.s[2]\n"
+ "fmla v14.4s, v22.4s, v0.s[2]\n"
+ "fmls v13.4s, v28.4s, v0.s[2]\n"
+ "mov v21.16b, v30.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "fsub v11.4s, v11.4s, v23.4s\n"
+ "fmls v15.4s, v31.4s, v0.s[3]\n"
+ "fmls v11.4s, v31.4s, v0.s[1]\n"
+ "fmls v14.4s, v29.4s, v0.s[3]\n"
+ "fmls v13.4s, v29.4s, v0.s[2]\n"
+ "fmla v21.4s, v28.4s, v0.s[2]\n"
+ "fmls v22.4s, v28.4s, v0.s[1]\n"
+ "mov v23.16b, v30.16b\n"
+ "mov v24.16b, v24.16b\n"
+ "fmls v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v13.4s, v13.4s, v27.4s\n"
+ "fmls v21.4s, v29.4s, v0.s[2]\n"
+ "fsub v22.4s, v22.4s, v29.4s\n"
+ "fmla v23.4s, v28.4s, v0.s[1]\n"
+ "fmla v22.4s, v27.4s, v0.s[1]\n"
+ "fmla v24.4s, v28.4s, v0.s[2]\n"
+ "fsub v21.4s, v21.4s, v27.4s\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fsub v23.4s, v23.4s, v29.4s\n"
+ "fmls v25.4s, v9.4s, v0.s[2]\n"
+ "fmls v23.4s, v27.4s, v0.s[1]\n"
+ "fmls v24.4s, v27.4s, v0.s[3]\n"
+ "fadd v26.4s, v26.4s, v14.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str q26, [x28]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "fmls v27.4s, v7.4s, v0.s[2]\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v17.16b\n"
+ "mov v29.16b, v18.16b\n"
+ "fadd v25.4s, v25.4s, v13.4s\n"
+ "fmls v31.4s, v8.4s, v0.s[2]\n"
+ "str q25, [x28, %[output_col_stride1]]\n"
+ "fmls v27.4s, v3.4s, v0.s[2]\n"
+ "fmls v30.4s, v11.4s, v0.s[2]\n"
+ "fmls v29.4s, v15.4s, v0.s[2]\n"
+ "fmls v31.4s, v4.4s, v0.s[2]\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v27.4s, v27.4s, v21.4s\n"
+ "mov v25.16b, v19.16b\n"
+ "str q27, [x28, x11]\n"
+ "fmls v30.4s, v5.4s, v0.s[2]\n"
+ "fadd v31.4s, v31.4s, v22.4s\n"
+ "fmls v29.4s, v6.4s, v0.s[2]\n"
+ "str q31, [x28, x13]\n"
+ "fmla v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v30.4s, v30.4s, v23.4s\n"
+ "fmla v25.4s, v9.4s, v0.s[2]\n"
+ "str q30, [x28, x23]\n"
+ "fadd v29.4s, v29.4s, v24.4s\n"
+ "str q29, [x28, x15]\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "add x28, x28, #16\n"
+ "mov v30.16b, v20.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "fsub v26.4s, v26.4s, v14.4s\n"
+ "mov v28.16b, v17.16b\n"
+ "str q26, [x22]\n"
+ "fsub v25.4s, v25.4s, v13.4s\n"
+ "str q25, [x22, %[output_col_stride1]]\n"
+ "fmla v30.4s, v7.4s, v0.s[2]\n"
+ "fmla v29.4s, v8.4s, v0.s[2]\n"
+ "fmla v28.4s, v11.4s, v0.s[2]\n"
+ "mov v26.16b, v18.16b\n"
+ "mov v25.16b, v12.16b\n"
+ "fmls v30.4s, v3.4s, v0.s[2]\n"
+ "mov v31.16b, v19.16b\n"
+ "fmls v29.4s, v4.4s, v0.s[2]\n"
+ "fmls v28.4s, v5.4s, v0.s[2]\n"
+ "fmla v26.4s, v15.4s, v0.s[2]\n"
+ "fmls v25.4s, v10.4s, v0.s[1]\n"
+ "fsub v30.4s, v30.4s, v21.4s\n"
+ "fmls v31.4s, v9.4s, v0.s[1]\n"
+ "str q30, [x22, x11]\n"
+ "fsub v29.4s, v29.4s, v22.4s\n"
+ "str q29, [x22, x13]\n"
+ "fsub v28.4s, v28.4s, v23.4s\n"
+ "str q28, [x22, x23]\n"
+ "fmls v26.4s, v6.4s, v0.s[2]\n"
+ "fsub v25.4s, v25.4s, v1.4s\n"
+ "fsub v31.4s, v31.4s, v2.4s\n"
+ "fmla v25.4s, v14.4s, v0.s[1]\n"
+ "fmla v31.4s, v13.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v24.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str q26, [x22, x15]\n"
+ "mov v26.16b, v16.16b\n"
+ "str q25, [x12]\n"
+ "fmls v27.4s, v7.4s, v0.s[1]\n"
+ "str q31, [x12, %[output_col_stride1]]\n"
+ "fmls v26.4s, v8.4s, v0.s[1]\n"
+ "mov v25.16b, v17.16b\n"
+ "add x22, x22, #16\n"
+ "fsub v27.4s, v27.4s, v3.4s\n"
+ "mov v28.16b, v18.16b\n"
+ "fmla v27.4s, v21.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v4.4s\n"
+ "fmla v26.4s, v22.4s, v0.s[1]\n"
+ "fmls v25.4s, v11.4s, v0.s[1]\n"
+ "fmls v28.4s, v15.4s, v0.s[1]\n"
+ "mov v12.16b, v12.16b\n"
+ "str q27, [x12, x11]\n"
+ "mov v19.16b, v19.16b\n"
+ "str q26, [x12, x13]\n"
+ "fsub v25.4s, v25.4s, v5.4s\n"
+ "fmla v25.4s, v23.4s, v0.s[1]\n"
+ "fsub v28.4s, v28.4s, v6.4s\n"
+ "fmla v28.4s, v24.4s, v0.s[1]\n"
+ "fmla v12.4s, v10.4s, v0.s[1]\n"
+ "fmla v19.4s, v9.4s, v0.s[1]\n"
+ "mov v20.16b, v20.16b\n"
+ "str q25, [x12, x23]\n"
+ "mov v16.16b, v16.16b\n"
+ "str q28, [x12, x15]\n"
+ "fsub v12.4s, v12.4s, v1.4s\n"
+ "fmls v12.4s, v14.4s, v0.s[1]\n"
+ "add x12, x12, #16\n"
+ "fsub v19.4s, v19.4s, v2.4s\n"
+ "fmla v20.4s, v7.4s, v0.s[1]\n"
+ "fmls v19.4s, v13.4s, v0.s[1]\n"
+ "fmla v16.4s, v8.4s, v0.s[1]\n"
+ "str q12, [x14]\n"
+ "mov v1.16b, v17.16b\n"
+ "fsub v20.4s, v20.4s, v3.4s\n"
+ "mov v17.16b, v18.16b\n"
+ "str q19, [x14, %[output_col_stride1]]\n"
+ "fmls v20.4s, v21.4s, v0.s[1]\n"
+ "fsub v16.4s, v16.4s, v4.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fmls v16.4s, v22.4s, v0.s[1]\n"
+ "fmla v17.4s, v15.4s, v0.s[1]\n"
+ "str q20, [x14, x11]\n"
+ "fsub v1.4s, v1.4s, v5.4s\n"
+ "str q16, [x14, x13]\n"
+ "fmls v1.4s, v23.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v6.4s\n"
+ "fmls v17.4s, v24.4s, v0.s[1]\n"
+ "str q1, [x14, x23]\n"
+ "str q17, [x14, x15]\n"
+ "add x14, x14, #16\n"
+ "ldr q2, [x27, x20]\n"
+ "mov v4.16b, v2.16b\n"
+ "ldr q17, [x27, x10]\n"
+ "mov v12.16b, v2.16b\n"
+ "ldr q18, [x27]\n"
+ "fmla v4.4s, v18.4s, v0.s[2]\n"
+ "ldr q3, [x27, x21]\n"
+ "mov v6.16b, v2.16b\n"
+ "ldr q5, [x27, x8]\n"
+ "mov v1.16b, v2.16b\n"
+ "ldr q18, [x27, %[input_col_stride1]]\n"
+ "fmls v4.4s, v17.4s, v0.s[3]\n"
+ "add x27, x27, #16\n"
+ "fmls v12.4s, v18.4s, v0.s[2]\n"
+ "sub %w[n_channels], %w[n_channels], #4\n"
+ "fmla v6.4s, v18.4s, v0.s[2]\n"
+ "cmp %w[n_channels], #4\n"
+ "fmls v1.4s, v18.4s, v0.s[1]\n"
+ "mov v2.16b, v2.16b\n"
+ "fmls v12.4s, v17.4s, v0.s[2]\n"
+ "mov v3.16b, v3.16b\n"
+ "fmls v6.4s, v17.4s, v0.s[2]\n"
+ "fmla v2.4s, v18.4s, v0.s[1]\n"
+ "fsub v1.4s, v1.4s, v17.4s\n"
+ "fmla v3.4s, v18.4s, v0.s[2]\n"
+ "fadd v12.4s, v12.4s, v5.4s\n"
+ "fmla v1.4s, v5.4s, v0.s[1]\n"
+ "fsub v6.4s, v6.4s, v5.4s\n"
+ "fsub v2.4s, v2.4s, v17.4s\n"
+ "fmls v2.4s, v5.4s, v0.s[1]\n"
+ "fmls v3.4s, v5.4s, v0.s[3]\n"
+ "mov v4.16b, v4.16b\n"
+ "mov v16.16b, v12.16b\n"
+ "mov v5.16b, v6.16b\n"
+ "mov v6.16b, v1.16b\n"
+ "fmla v4.4s, v10.4s, v0.s[2]\n"
+ "fmla v16.4s, v9.4s, v0.s[2]\n"
+ "fmla v5.4s, v7.4s, v0.s[2]\n"
+ "fmla v6.4s, v8.4s, v0.s[2]\n"
+ "mov v9.16b, v2.16b\n"
+ "mov v10.16b, v3.16b\n"
+ "fmls v4.4s, v14.4s, v0.s[3]\n"
+ "fmls v16.4s, v13.4s, v0.s[3]\n"
+ "fmls v5.4s, v21.4s, v0.s[3]\n"
+ "fmls v6.4s, v22.4s, v0.s[3]\n"
+ "fmla v9.4s, v11.4s, v0.s[2]\n"
+ "fmla v10.4s, v15.4s, v0.s[2]\n"
+ "str q4, [x24]\n"
+ "str q16, [x24, %[output_col_stride1]]\n"
+ "str q5, [x24, x11]\n"
+ "str q6, [x24, x13]\n"
+ "fmls v9.4s, v23.4s, v0.s[3]\n"
+ "fmls v10.4s, v24.4s, v0.s[3]\n"
+ "str q9, [x24, x23]\n"
+ "str q10, [x24, x15]\n"
+ "add x24, x24, #16\n"
+ "bge 1b\n"
+ "2:\n"
+ "cmp %w[n_channels], #2\n"
+ "blt 3f\n"
+ "ldr d8, [%[inptr0], x20]\n"
+ "mov v14.16b, v8.16b\n"
+ "ldr d2, [%[inptr0], x10]\n"
+ "mov v10.16b, v8.16b\n"
+ "ldr d9, [%[inptr0]]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "ldr d1, [%[inptr0], x21]\n"
+ "mov v9.16b, v8.16b\n"
+ "ldr d4, [%[inptr0], x8]\n"
+ "mov v7.16b, v8.16b\n"
+ "ldr d12, [%[inptr0], %[input_col_stride1]]\n"
+ "fmls v14.4s, v2.4s, v0.s[3]\n"
+ "ldr d5, [x16, x20]\n"
+ "fmls v10.4s, v12.4s, v0.s[2]\n"
+ "ldr d20, [x16, x10]\n"
+ "fmla v9.4s, v12.4s, v0.s[2]\n"
+ "ldr d3, [x16]\n"
+ "fmls v7.4s, v12.4s, v0.s[1]\n"
+ "ldr d6, [x16, x21]\n"
+ "fmls v10.4s, v2.4s, v0.s[2]\n"
+ "ldr d16, [x16, x8]\n"
+ "fmls v9.4s, v2.4s, v0.s[2]\n"
+ "ldr d22, [x16, %[input_col_stride1]]\n"
+ "fsub v7.4s, v7.4s, v2.4s\n"
+ "ldr d17, [x17, x20]\n"
+ "fadd v10.4s, v10.4s, v4.4s\n"
+ "ldr d15, [x17, x10]\n"
+ "fsub v9.4s, v9.4s, v4.4s\n"
+ "ldr d19, [x17]\n"
+ "fmla v7.4s, v4.4s, v0.s[1]\n"
+ "ldr d18, [x17, x21]\n"
+ "mov v8.16b, v8.16b\n"
+ "ldr d13, [x17, x8]\n"
+ "mov v11.16b, v1.16b\n"
+ "ldr d21, [x17, %[input_col_stride1]]\n"
+ "fmla v8.4s, v12.4s, v0.s[1]\n"
+ "add %[inptr0], %[inptr0], #8\n"
+ "fmla v11.4s, v12.4s, v0.s[2]\n"
+ "add x16, x16, #8\n"
+ "mov v1.16b, v5.16b\n"
+ "add x17, x17, #8\n"
+ "fsub v8.4s, v8.4s, v2.4s\n"
+ "mov v2.16b, v5.16b\n"
+ "fmls v8.4s, v4.4s, v0.s[1]\n"
+ "fmls v11.4s, v4.4s, v0.s[3]\n"
+ "fmla v1.4s, v3.4s, v0.s[2]\n"
+ "fmls v2.4s, v22.4s, v0.s[2]\n"
+ "mov v3.16b, v5.16b\n"
+ "mov v4.16b, v5.16b\n"
+ "mov v5.16b, v5.16b\n"
+ "mov v6.16b, v6.16b\n"
+ "fmls v1.4s, v20.4s, v0.s[3]\n"
+ "fmls v2.4s, v20.4s, v0.s[2]\n"
+ "fmla v3.4s, v22.4s, v0.s[2]\n"
+ "fmls v4.4s, v22.4s, v0.s[1]\n"
+ "fmla v5.4s, v22.4s, v0.s[1]\n"
+ "fmla v6.4s, v22.4s, v0.s[2]\n"
+ "fadd v2.4s, v2.4s, v16.4s\n"
+ "mov v12.16b, v17.16b\n"
+ "fmls v3.4s, v20.4s, v0.s[2]\n"
+ "fsub v4.4s, v4.4s, v20.4s\n"
+ "fmla v4.4s, v16.4s, v0.s[1]\n"
+ "fsub v5.4s, v5.4s, v20.4s\n"
+ "fmls v5.4s, v16.4s, v0.s[1]\n"
+ "fmls v6.4s, v16.4s, v0.s[3]\n"
+ "fsub v3.4s, v3.4s, v16.4s\n"
+ "fmla v12.4s, v19.4s, v0.s[2]\n"
+ "mov v19.16b, v17.16b\n"
+ "mov v20.16b, v17.16b\n"
+ "mov v16.16b, v17.16b\n"
+ "mov v17.16b, v17.16b\n"
+ "fmls v12.4s, v15.4s, v0.s[3]\n"
+ "fmls v19.4s, v21.4s, v0.s[2]\n"
+ "fmla v20.4s, v21.4s, v0.s[2]\n"
+ "fmls v16.4s, v21.4s, v0.s[1]\n"
+ "fmla v17.4s, v21.4s, v0.s[1]\n"
+ "mov v18.16b, v18.16b\n"
+ "fmls v19.4s, v15.4s, v0.s[2]\n"
+ "mov v23.16b, v12.16b\n"
+ "fmls v20.4s, v15.4s, v0.s[2]\n"
+ "fsub v16.4s, v16.4s, v15.4s\n"
+ "fmla v16.4s, v13.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v15.4s\n"
+ "fadd v19.4s, v19.4s, v13.4s\n"
+ "fmls v17.4s, v13.4s, v0.s[1]\n"
+ "fsub v20.4s, v20.4s, v13.4s\n"
+ "fmla v18.4s, v21.4s, v0.s[2]\n"
+ "fmla v23.4s, v14.4s, v0.s[2]\n"
+ "mov v15.16b, v19.16b\n"
+ "mov v14.16b, v20.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "fmls v18.4s, v13.4s, v0.s[3]\n"
+ "fmla v15.4s, v10.4s, v0.s[2]\n"
+ "fmls v23.4s, v1.4s, v0.s[3]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "fmla v24.4s, v7.4s, v0.s[2]\n"
+ "mov v10.16b, v17.16b\n"
+ "fmls v15.4s, v2.4s, v0.s[3]\n"
+ "mov v7.16b, v18.16b\n"
+ "str d23, [%[outptr0]]\n"
+ "fmls v14.4s, v3.4s, v0.s[3]\n"
+ "fmls v24.4s, v4.4s, v0.s[3]\n"
+ "fmla v10.4s, v8.4s, v0.s[2]\n"
+ "str d15, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v7.4s, v11.4s, v0.s[2]\n"
+ "str d14, [%[outptr0], x11]\n"
+ "fmls v10.4s, v5.4s, v0.s[3]\n"
+ "str d24, [%[outptr0], x13]\n"
+ "fmls v7.4s, v6.4s, v0.s[3]\n"
+ "str d10, [%[outptr0], x23]\n"
+ "str d7, [%[outptr0], x15]\n"
+ "add %[outptr0], %[outptr0], #8\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v25.16b, v19.16b\n"
+ "ldr d11, [x25, x20]\n"
+ "mov v10.16b, v11.16b\n"
+ "ldr d23, [x25, x10]\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr d7, [x25]\n"
+ "fmla v10.4s, v7.4s, v0.s[2]\n"
+ "ldr d13, [x25, x21]\n"
+ "mov v7.16b, v11.16b\n"
+ "ldr d31, [x25, x8]\n"
+ "mov v8.16b, v11.16b\n"
+ "ldr d21, [x25, %[input_col_stride1]]\n"
+ "fmls v10.4s, v23.4s, v0.s[3]\n"
+ "ldr d30, [x26, x20]\n"
+ "fmls v9.4s, v21.4s, v0.s[2]\n"
+ "ldr d29, [x26, x10]\n"
+ "fmla v7.4s, v21.4s, v0.s[2]\n"
+ "ldr d22, [x26]\n"
+ "fmls v8.4s, v21.4s, v0.s[1]\n"
+ "ldr d24, [x26, x21]\n"
+ "fmls v9.4s, v23.4s, v0.s[2]\n"
+ "ldr d27, [x26, x8]\n"
+ "fmls v7.4s, v23.4s, v0.s[2]\n"
+ "ldr d28, [x26, %[input_col_stride1]]\n"
+ "fsub v8.4s, v8.4s, v23.4s\n"
+ "add x25, x25, #8\n"
+ "fadd v9.4s, v9.4s, v31.4s\n"
+ "add x26, x26, #8\n"
+ "fsub v7.4s, v7.4s, v31.4s\n"
+ "fmla v8.4s, v31.4s, v0.s[1]\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v15.16b, v13.16b\n"
+ "mov v14.16b, v30.16b\n"
+ "mov v13.16b, v30.16b\n"
+ "fmla v11.4s, v21.4s, v0.s[1]\n"
+ "fmla v15.4s, v21.4s, v0.s[2]\n"
+ "fmla v14.4s, v22.4s, v0.s[2]\n"
+ "fmls v13.4s, v28.4s, v0.s[2]\n"
+ "mov v21.16b, v30.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "fsub v11.4s, v11.4s, v23.4s\n"
+ "fmls v15.4s, v31.4s, v0.s[3]\n"
+ "fmls v11.4s, v31.4s, v0.s[1]\n"
+ "fmls v14.4s, v29.4s, v0.s[3]\n"
+ "fmls v13.4s, v29.4s, v0.s[2]\n"
+ "fmla v21.4s, v28.4s, v0.s[2]\n"
+ "fmls v22.4s, v28.4s, v0.s[1]\n"
+ "mov v23.16b, v30.16b\n"
+ "mov v24.16b, v24.16b\n"
+ "fmls v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v13.4s, v13.4s, v27.4s\n"
+ "fmls v21.4s, v29.4s, v0.s[2]\n"
+ "fsub v22.4s, v22.4s, v29.4s\n"
+ "fmla v23.4s, v28.4s, v0.s[1]\n"
+ "fmla v22.4s, v27.4s, v0.s[1]\n"
+ "fmla v24.4s, v28.4s, v0.s[2]\n"
+ "fsub v21.4s, v21.4s, v27.4s\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fsub v23.4s, v23.4s, v29.4s\n"
+ "fmls v25.4s, v9.4s, v0.s[2]\n"
+ "fmls v23.4s, v27.4s, v0.s[1]\n"
+ "fmls v24.4s, v27.4s, v0.s[3]\n"
+ "fadd v26.4s, v26.4s, v14.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str d26, [x28]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "fmls v27.4s, v7.4s, v0.s[2]\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v17.16b\n"
+ "mov v29.16b, v18.16b\n"
+ "fadd v25.4s, v25.4s, v13.4s\n"
+ "fmls v31.4s, v8.4s, v0.s[2]\n"
+ "str d25, [x28, %[output_col_stride1]]\n"
+ "fmls v27.4s, v3.4s, v0.s[2]\n"
+ "fmls v30.4s, v11.4s, v0.s[2]\n"
+ "fmls v29.4s, v15.4s, v0.s[2]\n"
+ "fmls v31.4s, v4.4s, v0.s[2]\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v27.4s, v27.4s, v21.4s\n"
+ "mov v25.16b, v19.16b\n"
+ "str d27, [x28, x11]\n"
+ "fmls v30.4s, v5.4s, v0.s[2]\n"
+ "fadd v31.4s, v31.4s, v22.4s\n"
+ "fmls v29.4s, v6.4s, v0.s[2]\n"
+ "str d31, [x28, x13]\n"
+ "fmla v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v30.4s, v30.4s, v23.4s\n"
+ "fmla v25.4s, v9.4s, v0.s[2]\n"
+ "str d30, [x28, x23]\n"
+ "fadd v29.4s, v29.4s, v24.4s\n"
+ "str d29, [x28, x15]\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "add x28, x28, #8\n"
+ "mov v30.16b, v20.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "fsub v26.4s, v26.4s, v14.4s\n"
+ "mov v28.16b, v17.16b\n"
+ "str d26, [x22]\n"
+ "fsub v25.4s, v25.4s, v13.4s\n"
+ "str d25, [x22, %[output_col_stride1]]\n"
+ "fmla v30.4s, v7.4s, v0.s[2]\n"
+ "fmla v29.4s, v8.4s, v0.s[2]\n"
+ "fmla v28.4s, v11.4s, v0.s[2]\n"
+ "mov v26.16b, v18.16b\n"
+ "mov v25.16b, v12.16b\n"
+ "fmls v30.4s, v3.4s, v0.s[2]\n"
+ "mov v31.16b, v19.16b\n"
+ "fmls v29.4s, v4.4s, v0.s[2]\n"
+ "fmls v28.4s, v5.4s, v0.s[2]\n"
+ "fmla v26.4s, v15.4s, v0.s[2]\n"
+ "fmls v25.4s, v10.4s, v0.s[1]\n"
+ "fsub v30.4s, v30.4s, v21.4s\n"
+ "fmls v31.4s, v9.4s, v0.s[1]\n"
+ "str d30, [x22, x11]\n"
+ "fsub v29.4s, v29.4s, v22.4s\n"
+ "str d29, [x22, x13]\n"
+ "fsub v28.4s, v28.4s, v23.4s\n"
+ "str d28, [x22, x23]\n"
+ "fmls v26.4s, v6.4s, v0.s[2]\n"
+ "fsub v25.4s, v25.4s, v1.4s\n"
+ "fsub v31.4s, v31.4s, v2.4s\n"
+ "fmla v25.4s, v14.4s, v0.s[1]\n"
+ "fmla v31.4s, v13.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v24.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str d26, [x22, x15]\n"
+ "mov v26.16b, v16.16b\n"
+ "str d25, [x12]\n"
+ "fmls v27.4s, v7.4s, v0.s[1]\n"
+ "str d31, [x12, %[output_col_stride1]]\n"
+ "fmls v26.4s, v8.4s, v0.s[1]\n"
+ "mov v25.16b, v17.16b\n"
+ "add x22, x22, #8\n"
+ "fsub v27.4s, v27.4s, v3.4s\n"
+ "mov v28.16b, v18.16b\n"
+ "fmla v27.4s, v21.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v4.4s\n"
+ "fmla v26.4s, v22.4s, v0.s[1]\n"
+ "fmls v25.4s, v11.4s, v0.s[1]\n"
+ "fmls v28.4s, v15.4s, v0.s[1]\n"
+ "mov v12.16b, v12.16b\n"
+ "str d27, [x12, x11]\n"
+ "mov v19.16b, v19.16b\n"
+ "str d26, [x12, x13]\n"
+ "fsub v25.4s, v25.4s, v5.4s\n"
+ "fmla v25.4s, v23.4s, v0.s[1]\n"
+ "fsub v28.4s, v28.4s, v6.4s\n"
+ "fmla v28.4s, v24.4s, v0.s[1]\n"
+ "fmla v12.4s, v10.4s, v0.s[1]\n"
+ "fmla v19.4s, v9.4s, v0.s[1]\n"
+ "mov v20.16b, v20.16b\n"
+ "str d25, [x12, x23]\n"
+ "mov v16.16b, v16.16b\n"
+ "str d28, [x12, x15]\n"
+ "fsub v12.4s, v12.4s, v1.4s\n"
+ "fmls v12.4s, v14.4s, v0.s[1]\n"
+ "add x12, x12, #8\n"
+ "fsub v19.4s, v19.4s, v2.4s\n"
+ "fmla v20.4s, v7.4s, v0.s[1]\n"
+ "fmls v19.4s, v13.4s, v0.s[1]\n"
+ "fmla v16.4s, v8.4s, v0.s[1]\n"
+ "str d12, [x14]\n"
+ "mov v1.16b, v17.16b\n"
+ "fsub v20.4s, v20.4s, v3.4s\n"
+ "mov v17.16b, v18.16b\n"
+ "str d19, [x14, %[output_col_stride1]]\n"
+ "fmls v20.4s, v21.4s, v0.s[1]\n"
+ "fsub v16.4s, v16.4s, v4.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fmls v16.4s, v22.4s, v0.s[1]\n"
+ "fmla v17.4s, v15.4s, v0.s[1]\n"
+ "str d20, [x14, x11]\n"
+ "fsub v1.4s, v1.4s, v5.4s\n"
+ "str d16, [x14, x13]\n"
+ "fmls v1.4s, v23.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v6.4s\n"
+ "fmls v17.4s, v24.4s, v0.s[1]\n"
+ "str d1, [x14, x23]\n"
+ "str d17, [x14, x15]\n"
+ "add x14, x14, #8\n"
+ "ldr d2, [x27, x20]\n"
+ "mov v4.16b, v2.16b\n"
+ "ldr d17, [x27, x10]\n"
+ "mov v12.16b, v2.16b\n"
+ "ldr d18, [x27]\n"
+ "fmla v4.4s, v18.4s, v0.s[2]\n"
+ "ldr d3, [x27, x21]\n"
+ "mov v6.16b, v2.16b\n"
+ "ldr d5, [x27, x8]\n"
+ "mov v1.16b, v2.16b\n"
+ "ldr d18, [x27, %[input_col_stride1]]\n"
+ "fmls v4.4s, v17.4s, v0.s[3]\n"
+ "add x27, x27, #8\n"
+ "fmls v12.4s, v18.4s, v0.s[2]\n"
+ "sub %w[n_channels], %w[n_channels], #2\n"
+ "fmla v6.4s, v18.4s, v0.s[2]\n"
+ "fmls v1.4s, v18.4s, v0.s[1]\n"
+ "mov v2.16b, v2.16b\n"
+ "mov v3.16b, v3.16b\n"
+ "fmls v12.4s, v17.4s, v0.s[2]\n"
+ "mov v4.16b, v4.16b\n"
+ "fmls v6.4s, v17.4s, v0.s[2]\n"
+ "fsub v1.4s, v1.4s, v17.4s\n"
+ "fmla v1.4s, v5.4s, v0.s[1]\n"
+ "fmla v2.4s, v18.4s, v0.s[1]\n"
+ "fadd v12.4s, v12.4s, v5.4s\n"
+ "fmla v3.4s, v18.4s, v0.s[2]\n"
+ "fsub v6.4s, v6.4s, v5.4s\n"
+ "fmla v4.4s, v10.4s, v0.s[2]\n"
+ "fsub v2.4s, v2.4s, v17.4s\n"
+ "mov v16.16b, v12.16b\n"
+ "fmls v2.4s, v5.4s, v0.s[1]\n"
+ "fmls v3.4s, v5.4s, v0.s[3]\n"
+ "fmls v4.4s, v14.4s, v0.s[3]\n"
+ "fmla v16.4s, v9.4s, v0.s[2]\n"
+ "mov v5.16b, v6.16b\n"
+ "mov v6.16b, v1.16b\n"
+ "mov v9.16b, v2.16b\n"
+ "mov v10.16b, v3.16b\n"
+ "str d4, [x24]\n"
+ "fmls v16.4s, v13.4s, v0.s[3]\n"
+ "fmla v5.4s, v7.4s, v0.s[2]\n"
+ "fmla v6.4s, v8.4s, v0.s[2]\n"
+ "fmla v9.4s, v11.4s, v0.s[2]\n"
+ "fmla v10.4s, v15.4s, v0.s[2]\n"
+ "str d16, [x24, %[output_col_stride1]]\n"
+ "fmls v5.4s, v21.4s, v0.s[3]\n"
+ "fmls v6.4s, v22.4s, v0.s[3]\n"
+ "fmls v9.4s, v23.4s, v0.s[3]\n"
+ "fmls v10.4s, v24.4s, v0.s[3]\n"
+ "str d5, [x24, x11]\n"
+ "str d6, [x24, x13]\n"
+ "str d9, [x24, x23]\n"
+ "str d10, [x24, x15]\n"
+ "add x24, x24, #8\n"
+ "3:\n"
+ "cbz %w[n_channels], 4f\n"
+ "ldr s8, [%[inptr0], x20]\n"
+ "mov v14.16b, v8.16b\n"
+ "ldr s2, [%[inptr0], x10]\n"
+ "mov v10.16b, v8.16b\n"
+ "ldr s9, [%[inptr0]]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "ldr s1, [%[inptr0], x21]\n"
+ "mov v9.16b, v8.16b\n"
+ "ldr s4, [%[inptr0], x8]\n"
+ "mov v7.16b, v8.16b\n"
+ "ldr s12, [%[inptr0], %[input_col_stride1]]\n"
+ "fmls v14.4s, v2.4s, v0.s[3]\n"
+ "ldr s5, [x16, x20]\n"
+ "fmls v10.4s, v12.4s, v0.s[2]\n"
+ "ldr s20, [x16, x10]\n"
+ "fmla v9.4s, v12.4s, v0.s[2]\n"
+ "ldr s3, [x16]\n"
+ "fmls v7.4s, v12.4s, v0.s[1]\n"
+ "ldr s6, [x16, x21]\n"
+ "fmls v10.4s, v2.4s, v0.s[2]\n"
+ "ldr s16, [x16, x8]\n"
+ "fmls v9.4s, v2.4s, v0.s[2]\n"
+ "ldr s22, [x16, %[input_col_stride1]]\n"
+ "fsub v7.4s, v7.4s, v2.4s\n"
+ "ldr s17, [x17, x20]\n"
+ "fadd v10.4s, v10.4s, v4.4s\n"
+ "ldr s15, [x17, x10]\n"
+ "fsub v9.4s, v9.4s, v4.4s\n"
+ "ldr s19, [x17]\n"
+ "fmla v7.4s, v4.4s, v0.s[1]\n"
+ "ldr s18, [x17, x21]\n"
+ "mov v8.16b, v8.16b\n"
+ "ldr s13, [x17, x8]\n"
+ "mov v11.16b, v1.16b\n"
+ "ldr s21, [x17, %[input_col_stride1]]\n"
+ "fmla v8.4s, v12.4s, v0.s[1]\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v11.4s, v12.4s, v0.s[2]\n"
+ "add x16, x16, #4\n"
+ "mov v1.16b, v5.16b\n"
+ "add x17, x17, #4\n"
+ "fsub v8.4s, v8.4s, v2.4s\n"
+ "mov v2.16b, v5.16b\n"
+ "fmls v8.4s, v4.4s, v0.s[1]\n"
+ "fmls v11.4s, v4.4s, v0.s[3]\n"
+ "fmla v1.4s, v3.4s, v0.s[2]\n"
+ "fmls v2.4s, v22.4s, v0.s[2]\n"
+ "mov v3.16b, v5.16b\n"
+ "mov v4.16b, v5.16b\n"
+ "mov v5.16b, v5.16b\n"
+ "mov v6.16b, v6.16b\n"
+ "fmls v1.4s, v20.4s, v0.s[3]\n"
+ "fmls v2.4s, v20.4s, v0.s[2]\n"
+ "fmla v3.4s, v22.4s, v0.s[2]\n"
+ "fmls v4.4s, v22.4s, v0.s[1]\n"
+ "fmla v5.4s, v22.4s, v0.s[1]\n"
+ "fmla v6.4s, v22.4s, v0.s[2]\n"
+ "fadd v2.4s, v2.4s, v16.4s\n"
+ "mov v12.16b, v17.16b\n"
+ "fmls v3.4s, v20.4s, v0.s[2]\n"
+ "fsub v4.4s, v4.4s, v20.4s\n"
+ "fmla v4.4s, v16.4s, v0.s[1]\n"
+ "fsub v5.4s, v5.4s, v20.4s\n"
+ "fmls v5.4s, v16.4s, v0.s[1]\n"
+ "fmls v6.4s, v16.4s, v0.s[3]\n"
+ "fsub v3.4s, v3.4s, v16.4s\n"
+ "fmla v12.4s, v19.4s, v0.s[2]\n"
+ "mov v19.16b, v17.16b\n"
+ "mov v20.16b, v17.16b\n"
+ "mov v16.16b, v17.16b\n"
+ "mov v17.16b, v17.16b\n"
+ "fmls v12.4s, v15.4s, v0.s[3]\n"
+ "fmls v19.4s, v21.4s, v0.s[2]\n"
+ "fmla v20.4s, v21.4s, v0.s[2]\n"
+ "fmls v16.4s, v21.4s, v0.s[1]\n"
+ "fmla v17.4s, v21.4s, v0.s[1]\n"
+ "mov v18.16b, v18.16b\n"
+ "fmls v19.4s, v15.4s, v0.s[2]\n"
+ "mov v23.16b, v12.16b\n"
+ "fmls v20.4s, v15.4s, v0.s[2]\n"
+ "fsub v16.4s, v16.4s, v15.4s\n"
+ "fmla v16.4s, v13.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v15.4s\n"
+ "fadd v19.4s, v19.4s, v13.4s\n"
+ "fmls v17.4s, v13.4s, v0.s[1]\n"
+ "fsub v20.4s, v20.4s, v13.4s\n"
+ "fmla v18.4s, v21.4s, v0.s[2]\n"
+ "fmla v23.4s, v14.4s, v0.s[2]\n"
+ "mov v15.16b, v19.16b\n"
+ "mov v14.16b, v20.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "fmls v18.4s, v13.4s, v0.s[3]\n"
+ "fmla v15.4s, v10.4s, v0.s[2]\n"
+ "fmls v23.4s, v1.4s, v0.s[3]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "fmla v24.4s, v7.4s, v0.s[2]\n"
+ "mov v10.16b, v17.16b\n"
+ "fmls v15.4s, v2.4s, v0.s[3]\n"
+ "mov v7.16b, v18.16b\n"
+ "str s23, [%[outptr0]]\n"
+ "fmls v14.4s, v3.4s, v0.s[3]\n"
+ "fmls v24.4s, v4.4s, v0.s[3]\n"
+ "fmla v10.4s, v8.4s, v0.s[2]\n"
+ "str s15, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v7.4s, v11.4s, v0.s[2]\n"
+ "str s14, [%[outptr0], x11]\n"
+ "fmls v10.4s, v5.4s, v0.s[3]\n"
+ "str s24, [%[outptr0], x13]\n"
+ "fmls v7.4s, v6.4s, v0.s[3]\n"
+ "str s10, [%[outptr0], x23]\n"
+ "str s7, [%[outptr0], x15]\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v25.16b, v19.16b\n"
+ "ldr s11, [x25, x20]\n"
+ "mov v10.16b, v11.16b\n"
+ "ldr s23, [x25, x10]\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr s7, [x25]\n"
+ "fmla v10.4s, v7.4s, v0.s[2]\n"
+ "ldr s13, [x25, x21]\n"
+ "mov v7.16b, v11.16b\n"
+ "ldr s31, [x25, x8]\n"
+ "mov v8.16b, v11.16b\n"
+ "ldr s21, [x25, %[input_col_stride1]]\n"
+ "fmls v10.4s, v23.4s, v0.s[3]\n"
+ "ldr s30, [x26, x20]\n"
+ "fmls v9.4s, v21.4s, v0.s[2]\n"
+ "ldr s29, [x26, x10]\n"
+ "fmla v7.4s, v21.4s, v0.s[2]\n"
+ "ldr s22, [x26]\n"
+ "fmls v8.4s, v21.4s, v0.s[1]\n"
+ "ldr s24, [x26, x21]\n"
+ "fmls v9.4s, v23.4s, v0.s[2]\n"
+ "ldr s27, [x26, x8]\n"
+ "fmls v7.4s, v23.4s, v0.s[2]\n"
+ "ldr s28, [x26, %[input_col_stride1]]\n"
+ "fsub v8.4s, v8.4s, v23.4s\n"
+ "add x25, x25, #4\n"
+ "fadd v9.4s, v9.4s, v31.4s\n"
+ "add x26, x26, #4\n"
+ "fsub v7.4s, v7.4s, v31.4s\n"
+ "fmla v8.4s, v31.4s, v0.s[1]\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v15.16b, v13.16b\n"
+ "mov v14.16b, v30.16b\n"
+ "mov v13.16b, v30.16b\n"
+ "fmla v11.4s, v21.4s, v0.s[1]\n"
+ "fmla v15.4s, v21.4s, v0.s[2]\n"
+ "fmla v14.4s, v22.4s, v0.s[2]\n"
+ "fmls v13.4s, v28.4s, v0.s[2]\n"
+ "mov v21.16b, v30.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "fsub v11.4s, v11.4s, v23.4s\n"
+ "fmls v15.4s, v31.4s, v0.s[3]\n"
+ "fmls v11.4s, v31.4s, v0.s[1]\n"
+ "fmls v14.4s, v29.4s, v0.s[3]\n"
+ "fmls v13.4s, v29.4s, v0.s[2]\n"
+ "fmla v21.4s, v28.4s, v0.s[2]\n"
+ "fmls v22.4s, v28.4s, v0.s[1]\n"
+ "mov v23.16b, v30.16b\n"
+ "mov v24.16b, v24.16b\n"
+ "fmls v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v13.4s, v13.4s, v27.4s\n"
+ "fmls v21.4s, v29.4s, v0.s[2]\n"
+ "fsub v22.4s, v22.4s, v29.4s\n"
+ "fmla v23.4s, v28.4s, v0.s[1]\n"
+ "fmla v22.4s, v27.4s, v0.s[1]\n"
+ "fmla v24.4s, v28.4s, v0.s[2]\n"
+ "fsub v21.4s, v21.4s, v27.4s\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fsub v23.4s, v23.4s, v29.4s\n"
+ "fmls v25.4s, v9.4s, v0.s[2]\n"
+ "fmls v23.4s, v27.4s, v0.s[1]\n"
+ "fmls v24.4s, v27.4s, v0.s[3]\n"
+ "fadd v26.4s, v26.4s, v14.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str s26, [x28]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "fmls v27.4s, v7.4s, v0.s[2]\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v17.16b\n"
+ "mov v29.16b, v18.16b\n"
+ "fadd v25.4s, v25.4s, v13.4s\n"
+ "fmls v31.4s, v8.4s, v0.s[2]\n"
+ "str s25, [x28, %[output_col_stride1]]\n"
+ "fmls v27.4s, v3.4s, v0.s[2]\n"
+ "fmls v30.4s, v11.4s, v0.s[2]\n"
+ "fmls v29.4s, v15.4s, v0.s[2]\n"
+ "fmls v31.4s, v4.4s, v0.s[2]\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v27.4s, v27.4s, v21.4s\n"
+ "mov v25.16b, v19.16b\n"
+ "str s27, [x28, x11]\n"
+ "fmls v30.4s, v5.4s, v0.s[2]\n"
+ "fadd v31.4s, v31.4s, v22.4s\n"
+ "fmls v29.4s, v6.4s, v0.s[2]\n"
+ "str s31, [x28, x13]\n"
+ "fmla v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v30.4s, v30.4s, v23.4s\n"
+ "fmla v25.4s, v9.4s, v0.s[2]\n"
+ "str s30, [x28, x23]\n"
+ "fadd v29.4s, v29.4s, v24.4s\n"
+ "str s29, [x28, x15]\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "add x28, x28, #4\n"
+ "mov v30.16b, v20.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "fsub v26.4s, v26.4s, v14.4s\n"
+ "mov v28.16b, v17.16b\n"
+ "str s26, [x22]\n"
+ "fsub v25.4s, v25.4s, v13.4s\n"
+ "str s25, [x22, %[output_col_stride1]]\n"
+ "fmla v30.4s, v7.4s, v0.s[2]\n"
+ "fmla v29.4s, v8.4s, v0.s[2]\n"
+ "fmla v28.4s, v11.4s, v0.s[2]\n"
+ "mov v26.16b, v18.16b\n"
+ "mov v25.16b, v12.16b\n"
+ "fmls v30.4s, v3.4s, v0.s[2]\n"
+ "mov v31.16b, v19.16b\n"
+ "fmls v29.4s, v4.4s, v0.s[2]\n"
+ "fmls v28.4s, v5.4s, v0.s[2]\n"
+ "fmla v26.4s, v15.4s, v0.s[2]\n"
+ "fmls v25.4s, v10.4s, v0.s[1]\n"
+ "fsub v30.4s, v30.4s, v21.4s\n"
+ "fmls v31.4s, v9.4s, v0.s[1]\n"
+ "str s30, [x22, x11]\n"
+ "fsub v29.4s, v29.4s, v22.4s\n"
+ "str s29, [x22, x13]\n"
+ "fsub v28.4s, v28.4s, v23.4s\n"
+ "str s28, [x22, x23]\n"
+ "fmls v26.4s, v6.4s, v0.s[2]\n"
+ "fsub v25.4s, v25.4s, v1.4s\n"
+ "fsub v31.4s, v31.4s, v2.4s\n"
+ "fmla v25.4s, v14.4s, v0.s[1]\n"
+ "fmla v31.4s, v13.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v24.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str s26, [x22, x15]\n"
+ "mov v26.16b, v16.16b\n"
+ "str s25, [x12]\n"
+ "fmls v27.4s, v7.4s, v0.s[1]\n"
+ "str s31, [x12, %[output_col_stride1]]\n"
+ "fmls v26.4s, v8.4s, v0.s[1]\n"
+ "mov v25.16b, v17.16b\n"
+ "add x22, x22, #4\n"
+ "fsub v27.4s, v27.4s, v3.4s\n"
+ "mov v28.16b, v18.16b\n"
+ "fmla v27.4s, v21.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v4.4s\n"
+ "fmla v26.4s, v22.4s, v0.s[1]\n"
+ "fmls v25.4s, v11.4s, v0.s[1]\n"
+ "fmls v28.4s, v15.4s, v0.s[1]\n"
+ "mov v12.16b, v12.16b\n"
+ "str s27, [x12, x11]\n"
+ "mov v19.16b, v19.16b\n"
+ "str s26, [x12, x13]\n"
+ "fsub v25.4s, v25.4s, v5.4s\n"
+ "fmla v25.4s, v23.4s, v0.s[1]\n"
+ "fsub v28.4s, v28.4s, v6.4s\n"
+ "fmla v28.4s, v24.4s, v0.s[1]\n"
+ "fmla v12.4s, v10.4s, v0.s[1]\n"
+ "fmla v19.4s, v9.4s, v0.s[1]\n"
+ "mov v20.16b, v20.16b\n"
+ "str s25, [x12, x23]\n"
+ "mov v16.16b, v16.16b\n"
+ "str s28, [x12, x15]\n"
+ "fsub v12.4s, v12.4s, v1.4s\n"
+ "fmls v12.4s, v14.4s, v0.s[1]\n"
+ "add x12, x12, #4\n"
+ "fsub v19.4s, v19.4s, v2.4s\n"
+ "fmla v20.4s, v7.4s, v0.s[1]\n"
+ "fmls v19.4s, v13.4s, v0.s[1]\n"
+ "fmla v16.4s, v8.4s, v0.s[1]\n"
+ "str s12, [x14]\n"
+ "mov v1.16b, v17.16b\n"
+ "fsub v20.4s, v20.4s, v3.4s\n"
+ "mov v17.16b, v18.16b\n"
+ "str s19, [x14, %[output_col_stride1]]\n"
+ "fmls v20.4s, v21.4s, v0.s[1]\n"
+ "fsub v16.4s, v16.4s, v4.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fmls v16.4s, v22.4s, v0.s[1]\n"
+ "fmla v17.4s, v15.4s, v0.s[1]\n"
+ "str s20, [x14, x11]\n"
+ "fsub v1.4s, v1.4s, v5.4s\n"
+ "str s16, [x14, x13]\n"
+ "fmls v1.4s, v23.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v6.4s\n"
+ "fmls v17.4s, v24.4s, v0.s[1]\n"
+ "str s1, [x14, x23]\n"
+ "str s17, [x14, x15]\n"
+ "add x14, x14, #4\n"
+ "ldr s2, [x27, x20]\n"
+ "mov v4.16b, v2.16b\n"
+ "ldr s17, [x27, x10]\n"
+ "mov v12.16b, v2.16b\n"
+ "ldr s18, [x27]\n"
+ "fmla v4.4s, v18.4s, v0.s[2]\n"
+ "ldr s3, [x27, x21]\n"
+ "mov v6.16b, v2.16b\n"
+ "ldr s5, [x27, x8]\n"
+ "mov v1.16b, v2.16b\n"
+ "ldr s18, [x27, %[input_col_stride1]]\n"
+ "fmls v4.4s, v17.4s, v0.s[3]\n"
+ "add x27, x27, #4\n"
+ "fmls v12.4s, v18.4s, v0.s[2]\n"
+ "fmla v6.4s, v18.4s, v0.s[2]\n"
+ "fmls v1.4s, v18.4s, v0.s[1]\n"
+ "mov v2.16b, v2.16b\n"
+ "mov v3.16b, v3.16b\n"
+ "mov v4.16b, v4.16b\n"
+ "fmls v12.4s, v17.4s, v0.s[2]\n"
+ "fmls v6.4s, v17.4s, v0.s[2]\n"
+ "fsub v1.4s, v1.4s, v17.4s\n"
+ "fmla v2.4s, v18.4s, v0.s[1]\n"
+ "fmla v1.4s, v5.4s, v0.s[1]\n"
+ "fmla v3.4s, v18.4s, v0.s[2]\n"
+ "fadd v12.4s, v12.4s, v5.4s\n"
+ "fsub v6.4s, v6.4s, v5.4s\n"
+ "fsub v2.4s, v2.4s, v17.4s\n"
+ "fmla v4.4s, v10.4s, v0.s[2]\n"
+ "fmls v2.4s, v5.4s, v0.s[1]\n"
+ "fmls v3.4s, v5.4s, v0.s[3]\n"
+ "mov v16.16b, v12.16b\n"
+ "mov v5.16b, v6.16b\n"
+ "fmls v4.4s, v14.4s, v0.s[3]\n"
+ "mov v6.16b, v1.16b\n"
+ "fmla v16.4s, v9.4s, v0.s[2]\n"
+ "fmla v5.4s, v7.4s, v0.s[2]\n"
+ "fmla v6.4s, v8.4s, v0.s[2]\n"
+ "mov v9.16b, v2.16b\n"
+ "str s4, [x24]\n"
+ "mov v10.16b, v3.16b\n"
+ "fmls v16.4s, v13.4s, v0.s[3]\n"
+ "fmls v5.4s, v21.4s, v0.s[3]\n"
+ "fmls v6.4s, v22.4s, v0.s[3]\n"
+ "fmla v9.4s, v11.4s, v0.s[2]\n"
+ "fmla v10.4s, v15.4s, v0.s[2]\n"
+ "str s16, [x24, %[output_col_stride1]]\n"
+ "str s5, [x24, x11]\n"
+ "fmls v9.4s, v23.4s, v0.s[3]\n"
+ "str s6, [x24, x13]\n"
+ "fmls v10.4s, v24.4s, v0.s[3]\n"
+ "str s9, [x24, x23]\n"
+ "str s10, [x24, x15]\n"
+ "add x24, x24, #4\n"
+ "4:\n"
+ : [outptr0] "+r" (matrix_base),
+ [n_channels] "+r" (n_channels),
+ [inptr0] "+r" (input_base)
+ : [pcoeffs] "r" (pcoeffs),
+ [output_row_stride] "r" (6 * matrix_stride * sizeof(float)),
+ [output_col_stride1] "r" (matrix_stride * sizeof(float)),
+ [input_row_stride] "r" (input_row_stride * sizeof(float)),
+ [input_col_stride1] "r" (input_col_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
+ "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
+ "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8",
+ "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x10", "x8",
+ "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp
new file mode 100644
index 0000000000..3e1fc491f1
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstddef>
+#include <arm_neon.h>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void arm_fp32_1x8(
+ const unsigned int n_channels,
+ const float * input_base,
+ size_t, // We don't need to stride over rows
+ size_t input_col_stride,
+ float *outptr,
+ size_t matrix_stride
+)
+{
+ constexpr int inner_tile_cols = 8;
+
+ // Get pointers into the input tile
+ const float *x_ptrs[inner_tile_cols];
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+ {
+ x_ptrs[j] = input_base + xj*input_col_stride;
+ }
+
+ // Vectors used/computed in this kernel.
+ float x[inner_tile_cols];
+ float U[inner_tile_cols];
+
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = 0.0f;
+ }
+
+ // Perform the Winograd input transformation for each channel in the input
+ // tensor.
+ int channels_remaining = n_channels;
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ float32x4_t x[inner_tile_cols], U[inner_tile_cols];
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = vdupq_n_f32(0.0f);
+ }
+
+ // Load x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = vld1q_f32(x_ptrs[j]);
+ x_ptrs[j] += 4;
+ }
+
+ // Compute U = x . X
+ U[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
+ U[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
+ U[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
+ U[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
+ U[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
+ U[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
+ U[6] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
+ U[7] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
+
+ // Store the transformed vector
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ vst1q_f32(outptr + j*matrix_stride, U[j]);
+ }
+ outptr += 4;
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ float32x2_t x[inner_tile_cols], U[inner_tile_cols];
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = vdup_n_f32(0.0f);
+ }
+
+ // Load x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = vld1_f32(x_ptrs[j]);
+ x_ptrs[j] += 2;
+ }
+
+ // Compute U = x . X
+ U[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
+ U[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
+ U[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
+ U[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
+ U[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
+ U[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
+ U[6] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
+ U[7] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
+
+ // Store the transformed vector
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ vst1_f32(outptr + j*matrix_stride, U[j]);
+ }
+ outptr += 2;
+ }
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Load x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = *(x_ptrs[j]++);
+ }
+
+ // Compute U = x . X
+ U[0] = x[0]*-36 + x[4]*-14 + x[2]*49 + x[6]*1;
+ U[1] = x[5]*-1 + x[1]*-36 + x[4]*-13 + x[3]*13 + x[2]*36 + x[6]*1;
+ U[2] = x[3]*-13 + x[4]*-13 + x[1]*36 + x[2]*36 + x[5]*1 + x[6]*1;
+ U[3] = x[1]*-18 + x[4]*-10 + x[5]*-2 + x[2]*9 + x[3]*20 + x[6]*1;
+ U[4] = x[3]*-20 + x[4]*-10 + x[5]*2 + x[2]*9 + x[1]*18 + x[6]*1;
+ U[5] = x[1]*-12 + x[4]*-5 + x[5]*-3 + x[2]*4 + x[3]*15 + x[6]*1;
+ U[6] = x[3]*-15 + x[4]*-5 + x[5]*3 + x[2]*4 + x[1]*12 + x[6]*1;
+ U[7] = x[1]*-36 + x[5]*-14 + x[3]*49 + x[7]*1;
+
+ // Store the transformed vector
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = U[j];
+ }
+ outptr++;
+ }
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_4x4.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_4x4.cpp
new file mode 100644
index 0000000000..a4e6b433c7
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_4x4.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstddef>
+#include <arm_neon.h>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void arm_fp32_4x4(
+ const unsigned int n_channels,
+ const float *input_base,
+ const size_t input_row_stride,
+ const size_t input_col_stride,
+ float *outptr,
+ const size_t matrix_stride
+)
+{
+ constexpr int inner_tile_rows = 4, inner_tile_cols = 4;
+
+ // Get pointers into the input tile
+ const float *x_ptrs[inner_tile_rows][inner_tile_cols];
+ for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
+ {
+ // Get a pointer into the row
+ const float* const row_ptr = input_base + xi*input_row_stride;
+
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+ {
+ x_ptrs[i][j] = row_ptr + xj*input_col_stride;
+ }
+ }
+
+ // Matrices used/computed in this kernel.
+ float x[inner_tile_rows][inner_tile_cols];
+ float XTx[inner_tile_rows][inner_tile_cols];
+ float U[inner_tile_rows][inner_tile_cols];
+
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = XTx[i][j] = 0.0f;
+ }
+ }
+
+ // Perform the Winograd input transformation for each channel in the input
+ // tensor.
+ int channels_remaining = n_channels;
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used/computed in this kernel.
+ float32x4_t x[inner_tile_rows][inner_tile_cols];
+ float32x4_t XTx[inner_tile_rows][inner_tile_cols];
+ float32x4_t U[inner_tile_rows][inner_tile_cols];
+
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vdupq_n_f32(0.0f);
+ XTx[i][j] = vdupq_n_f32(0.0f);
+ }
+ }
+
+ // Load x
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vld1q_f32(x_ptrs[i][j]);
+ x_ptrs[i][j] += 4;
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ // XTx[0][j] = x[0][j] - x[2][j];
+ XTx[0][j] = vsubq_f32(x[0][j], x[2][j]);
+
+ // XTx[1][j] = x[1][j] + x[2][j];
+ XTx[1][j] = vaddq_f32(x[1][j], x[2][j]);
+
+ // XTx[2][j] = x[2][j] - x[1][j];
+ XTx[2][j] = vsubq_f32(x[2][j], x[1][j]);
+
+ // XTx[3][j] = x[1][j] - x[3][j];
+ XTx[3][j] = vsubq_f32(x[1][j], x[3][j]);
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ // U[i][0] = XTx[i][0] - XTx[i][2];
+ U[i][0] = vsubq_f32(XTx[i][0], XTx[i][2]);
+
+ // U[i][1] = XTx[i][1] + XTx[i][2];
+ U[i][1] = vaddq_f32(XTx[i][1], XTx[i][2]);
+
+ // U[i][2] = XTx[i][2] - XTx[i][1];
+ U[i][2] = vsubq_f32(XTx[i][2], XTx[i][1]);
+
+ // U[i][3] = XTx[i][1] - XTx[i][3];
+ U[i][3] = vsubq_f32(XTx[i][1], XTx[i][3]);
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ vst1q_f32(outptr + m*matrix_stride, U[i][j]);
+ }
+ }
+ outptr += 4;
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used/computed in this kernel.
+ float32x2_t x[inner_tile_rows][inner_tile_cols];
+ float32x2_t XTx[inner_tile_rows][inner_tile_cols];
+ float32x2_t U[inner_tile_rows][inner_tile_cols];
+
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vdup_n_f32(0.0f);
+ XTx[i][j] = vdup_n_f32(0.0f);
+ }
+ }
+
+ // Load x
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vld1_f32(x_ptrs[i][j]);
+ x_ptrs[i][j] += 2;
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ // XTx[0][j] = x[0][j] - x[2][j];
+ XTx[0][j] = vsub_f32(x[0][j], x[2][j]);
+
+ // XTx[1][j] = x[1][j] + x[2][j];
+ XTx[1][j] = vadd_f32(x[1][j], x[2][j]);
+
+ // XTx[2][j] = x[2][j] - x[1][j];
+ XTx[2][j] = vsub_f32(x[2][j], x[1][j]);
+
+ // XTx[3][j] = x[1][j] - x[3][j];
+ XTx[3][j] = vsub_f32(x[1][j], x[3][j]);
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ // U[i][0] = XTx[i][0] - XTx[i][2];
+ U[i][0] = vsub_f32(XTx[i][0], XTx[i][2]);
+
+ // U[i][1] = XTx[i][1] + XTx[i][2];
+ U[i][1] = vadd_f32(XTx[i][1], XTx[i][2]);
+
+ // U[i][2] = XTx[i][2] - XTx[i][1];
+ U[i][2] = vsub_f32(XTx[i][2], XTx[i][1]);
+
+ // U[i][3] = XTx[i][1] - XTx[i][3];
+ U[i][3] = vsub_f32(XTx[i][1], XTx[i][3]);
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ vst1_f32(outptr + m*matrix_stride, U[i][j]);
+ }
+ }
+ outptr += 2;
+ }
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Load x
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = *(x_ptrs[i][j]++);
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ XTx[0][j] = x[0][j] - x[2][j];
+ XTx[1][j] = x[1][j] + x[2][j];
+ XTx[2][j] = x[2][j] - x[1][j];
+ XTx[3][j] = x[1][j] - x[3][j];
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ U[i][0] = XTx[i][0] - XTx[i][2];
+ U[i][1] = XTx[i][1] + XTx[i][2];
+ U[i][2] = XTx[i][2] - XTx[i][1];
+ U[i][3] = XTx[i][1] - XTx[i][3];
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = U[i][j];
+ }
+ }
+ outptr++;
+ }
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp
new file mode 100644
index 0000000000..4adc45768e
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __aarch64__
+
+#include <arm_neon.h>
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void arm_fp32_6x6(
+ unsigned int n_channels,
+ const float* const input_base,
+ const size_t input_row_stride,
+ const size_t input_col_stride,
+ float* outptr,
+ const size_t matrix_stride
+)
+{
+ constexpr int inner_tile_rows = 6;
+ constexpr int inner_tile_cols = 6;
+
+ // Get pointers into the input tile
+ const float *x_ptrs[inner_tile_rows][inner_tile_cols];
+ for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
+ {
+ // Get a pointer into the row
+ const float* const row_ptr = input_base + xi*input_row_stride;
+
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+ {
+ x_ptrs[i][j] = row_ptr + xj*input_col_stride;
+ }
+ }
+
+ // Matrices used/computed in this kernel.
+ float x[inner_tile_rows][inner_tile_cols];
+ float XTx[inner_tile_rows][inner_tile_cols];
+ float U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = XTx[i][j] = 0.0f;
+ }
+ }
+
+ // Perform the Winograd input transformation for each channel in the input
+ // tensor.
+ int channels_remaining = n_channels;
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used/computed in this kernel
+ float32x2_t x[inner_tile_rows][inner_tile_cols];
+ float32x2_t XTx[inner_tile_rows][inner_tile_cols];
+ float32x2_t U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vdup_n_f32(0.0f);
+ XTx[i][j] = vdup_n_f32(0.0f);
+ }
+ }
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vld1_f32(x_ptrs[i][j]);
+ x_ptrs[i][j] += 2;
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
+
+ // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
+
+ // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
+
+ // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
+
+ // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
+
+ // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
+
+ // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
+
+ // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
+
+ // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
+
+ // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
+
+ // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ vst1_f32(outptr + m*matrix_stride, U[i][j]);
+ }
+ }
+ outptr += 2;
+ }
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Load x
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = *(x_ptrs[i][j]++);
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = U[i][j];
+ }
+ }
+ outptr++;
+ }
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
+
+#endif // ! __aarch64__
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
new file mode 100644
index 0000000000..f446e7ea8b
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void sme_fp32_mla_6x6(
+ const unsigned int num_channels,
+ const float *input,
+ const size_t input_row_stride,
+ const size_t input_col_stride,
+ float *output,
+ const size_t output_col_stride
+)
+{
+ const float B_values[4] = { 1.0f, 2.0f, 4.0f, 5.0f };
+ long long_channels = num_channels;
+
+ // Generated by armasmgen (February 04th, 2021)
+ __asm__ __volatile__(
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "fmov z16.s, #4.0\n"
+ "ptrue p1.b\n"
+ "ld1rqw { z2.s }, p1/Z, [%x[B_values]]\n"
+ "add x16, %x[input_row_0], %x[input_row_stride], LSL #2\n"
+ "add x15, %x[output_row_0], %x[output_row_stride], LSL #2\n"
+ "add x14, %x[input_row_0], %x[input_row_stride], LSL #3\n"
+ "add x13, %x[output_row_0], %x[output_row_stride], LSL #3\n"
+ "add x12, x14, %x[input_row_stride], LSL #2\n"
+ "add x11, x13, %x[output_row_stride], LSL #2\n"
+ "add x10, %x[input_row_0], %x[input_row_stride], LSL #4\n"
+ "add x9, %x[output_row_0], %x[output_row_stride], LSL #4\n"
+ "add x28, x10, %x[input_row_stride], LSL #2\n"
+ "add x27, x9, %x[output_row_stride], LSL #2\n"
+ "lsl x26, %x[input_col_1_stride], #0x1\n"
+ "lsl x25, %x[output_col_1_stride], #0x1\n"
+ "add x24, x26, %x[input_col_1_stride]\n"
+ "add x23, x25, %x[output_col_1_stride]\n"
+ "lsl x22, %x[input_col_1_stride], #0x2\n"
+ "lsl x21, %x[output_col_1_stride], #0x2\n"
+ "add x20, x22, %x[input_col_1_stride]\n"
+ "add x8, x21, %x[output_col_1_stride]\n"
+ "whilelt p0.s, XZR, %x[num_channels]\n"
+ "beq 2f\n"
+ "1:" // channel_loop
+ "ld1w { z31.s }, p0/Z, [%x[input_row_0]]\n"
+ "decw %x[num_channels]\n"
+ "ld1w { z28.s }, p0/Z, [%x[input_row_0], %x[input_col_1_stride], LSL #2]\n"
+ "fmul z13.s, z28.s, z2.s[1]\n"
+ "ld1w { z27.s }, p0/Z, [%x[input_row_0], x26, LSL #2]\n"
+ "ld1w { z11.s }, p0/Z, [%x[input_row_0], x24, LSL #2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "ld1w { z7.s }, p0/Z, [%x[input_row_0], x22, LSL #2]\n"
+ "fsub z15.s, z7.s, z27.s\n"
+ "fmad z31.s, p1/M, z16.s, z7.s\n"
+ "ld1w { z3.s }, p0/Z, [%x[input_row_0], x20, LSL #2]\n"
+ "fmla z13.s, z11.s, z2.s[1]\n"
+ "ld1w { z12.s }, p0/Z, [x14]\n"
+ "incb %x[input_row_0]\n"
+ "fmls z31.s, z27.s, z2.s[3]\n"
+ "ld1w { z14.s }, p0/Z, [x14, %x[input_col_1_stride], LSL #2]\n"
+ "fsub z25.s, z15.s, z13.s\n"
+ "fadd z8.s, z13.s, z15.s\n"
+ "ld1w { z24.s }, p0/Z, [x14, x26, LSL #2]\n"
+ "fmsb z27.s, p1/M, z16.s, z7.s\n"
+ "ld1w { z22.s }, p0/Z, [x14, x24, LSL #2]\n"
+ "fmul z7.s, z28.s, z2.s[2]\n"
+ "ld1w { z1.s }, p0/Z, [x14, x22, LSL #2]\n"
+ "fsub z15.s, z1.s, z24.s\n"
+ "fneg z7.s, p1/M, z7.s\n"
+ "ld1w { z20.s }, p0/Z, [x14, x20, LSL #2]\n"
+ "fadd z7.s, z7.s, z11.s\n"
+ "ld1w { z29.s }, p0/Z, [x10]\n"
+ "incb x14\n"
+ "fmad z28.s, p1/M, z16.s, z3.s\n"
+ "ld1w { z10.s }, p0/Z, [x10, %x[input_col_1_stride], LSL #2]\n"
+ "fmad z12.s, p1/M, z16.s, z1.s\n"
+ "ld1w { z18.s }, p0/Z, [x10, x26, LSL #2]\n"
+ "fmul z13.s, z14.s, z2.s[1]\n"
+ "ld1w { z19.s }, p0/Z, [x10, x24, LSL #2]\n"
+ "fadd z17.s, z7.s, z27.s\n"
+ "ld1w { z9.s }, p0/Z, [x10, x22, LSL #2]\n"
+ "fsub z27.s, z27.s, z7.s\n"
+ "fmls z28.s, z11.s, z2.s[3]\n"
+ "ld1w { z21.s }, p0/Z, [x10, x20, LSL #2]\n"
+ "incb x10\n"
+ "fmls z12.s, z24.s, z2.s[3]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z22.s, z2.s[1]\n"
+ "fsub z30.s, z15.s, z13.s\n"
+ "fadd z4.s, z13.s, z15.s\n"
+ "fmsb z24.s, p1/M, z16.s, z1.s\n"
+ "fsub z15.s, z9.s, z18.s\n"
+ "fmul z1.s, z14.s, z2.s[2]\n"
+ "fmad z14.s, p1/M, z16.s, z20.s\n"
+ "fmad z29.s, p1/M, z16.s, z9.s\n"
+ "fmul z13.s, z10.s, z2.s[1]\n"
+ "fneg z1.s, p1/M, z1.s\n"
+ "fadd z1.s, z1.s, z22.s\n"
+ "fmls z14.s, z22.s, z2.s[3]\n"
+ "fmls z29.s, z18.s, z2.s[3]\n"
+ "fadd z5.s, z1.s, z24.s\n"
+ "fsub z24.s, z24.s, z1.s\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z19.s, z2.s[1]\n"
+ "fsub z23.s, z15.s, z13.s\n"
+ "fadd z11.s, z13.s, z15.s\n"
+ "fmsb z18.s, p1/M, z16.s, z9.s\n"
+ "fmul z9.s, z10.s, z2.s[2]\n"
+ "fmad z10.s, p1/M, z16.s, z21.s\n"
+ "fmad z31.s, p1/M, z16.s, z29.s\n"
+ "fmad z8.s, p1/M, z16.s, z11.s\n"
+ "fneg z9.s, p1/M, z9.s\n"
+ "fadd z9.s, z9.s, z19.s\n"
+ "fmls z10.s, z19.s, z2.s[3]\n"
+ "fmls z31.s, z12.s, z2.s[3]\n"
+ "st1w { z31.s }, p0, [%x[output_row_0]]\n"
+ "fadd z26.s, z9.s, z18.s\n"
+ "fsub z18.s, z18.s, z9.s\n"
+ "fmls z8.s, z4.s, z2.s[3]\n"
+ "fmad z25.s, p1/M, z16.s, z23.s\n"
+ "fmad z28.s, p1/M, z16.s, z10.s\n"
+ "fmad z17.s, p1/M, z16.s, z26.s\n"
+ "fmad z27.s, p1/M, z16.s, z18.s\n"
+ "fmls z25.s, z30.s, z2.s[3]\n"
+ "fmls z28.s, z14.s, z2.s[3]\n"
+ "fmls z17.s, z5.s, z2.s[3]\n"
+ "st1w { z17.s }, p0, [%x[output_row_0], %x[output_col_1_stride], LSL #2]\n"
+ "fmls z27.s, z24.s, z2.s[3]\n"
+ "st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n"
+ "st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n"
+ "st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n"
+ "st1w { z28.s }, p0, [%x[output_row_0], x8, LSL #2]\n"
+ "incb %x[output_row_0]\n"
+ "ld1w { z19.s }, p0/Z, [x16]\n"
+ "ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n"
+ "fmul z13.s, z7.s, z2.s[1]\n"
+ "ld1w { z6.s }, p0/Z, [x16, x26, LSL #2]\n"
+ "ld1w { z27.s }, p0/Z, [x16, x24, LSL #2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "ld1w { z25.s }, p0/Z, [x16, x22, LSL #2]\n"
+ "fsub z15.s, z25.s, z6.s\n"
+ "fmad z19.s, p1/M, z16.s, z25.s\n"
+ "ld1w { z20.s }, p0/Z, [x16, x20, LSL #2]\n"
+ "fmla z13.s, z27.s, z2.s[1]\n"
+ "ld1w { z0.s }, p0/Z, [x12]\n"
+ "incb x16\n"
+ "fmls z19.s, z6.s, z2.s[3]\n"
+ "ld1w { z31.s }, p0/Z, [x12, %x[input_col_1_stride], LSL #2]\n"
+ "fsub z8.s, z15.s, z13.s\n"
+ "fadd z28.s, z13.s, z15.s\n"
+ "ld1w { z1.s }, p0/Z, [x12, x26, LSL #2]\n"
+ "fmsb z6.s, p1/M, z16.s, z25.s\n"
+ "ld1w { z21.s }, p0/Z, [x12, x24, LSL #2]\n"
+ "fmul z25.s, z7.s, z2.s[2]\n"
+ "ld1w { z22.s }, p0/Z, [x12, x22, LSL #2]\n"
+ "fsub z15.s, z22.s, z1.s\n"
+ "fneg z25.s, p1/M, z25.s\n"
+ "ld1w { z17.s }, p0/Z, [x12, x20, LSL #2]\n"
+ "fadd z25.s, z25.s, z27.s\n"
+ "incb x12\n"
+ "fmad z7.s, p1/M, z16.s, z20.s\n"
+ "fmad z0.s, p1/M, z16.s, z22.s\n"
+ "fmul z13.s, z31.s, z2.s[1]\n"
+ "fadd z3.s, z25.s, z6.s\n"
+ "fsub z6.s, z6.s, z25.s\n"
+ "fmls z7.s, z27.s, z2.s[3]\n"
+ "fmls z0.s, z1.s, z2.s[3]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z21.s, z2.s[1]\n"
+ "fsub z9.s, z15.s, z13.s\n"
+ "fadd z27.s, z13.s, z15.s\n"
+ "fmsb z1.s, p1/M, z16.s, z22.s\n"
+ "fsub z15.s, z29.s, z12.s\n"
+ "fmul z22.s, z31.s, z2.s[2]\n"
+ "fmad z31.s, p1/M, z16.s, z17.s\n"
+ "fmul z13.s, z19.s, z2.s[1]\n"
+ "fmsb z12.s, p1/M, z16.s, z29.s\n"
+ "fneg z22.s, p1/M, z22.s\n"
+ "fadd z22.s, z22.s, z21.s\n"
+ "fmls z31.s, z21.s, z2.s[3]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fadd z25.s, z22.s, z1.s\n"
+ "fsub z1.s, z1.s, z22.s\n"
+ "fmla z13.s, z0.s, z2.s[1]\n"
+ "fmul z29.s, z19.s, z2.s[2]\n"
+ "fadd z22.s, z13.s, z15.s\n"
+ "st1w { z22.s }, p0, [x11]\n"
+ "fneg z29.s, p1/M, z29.s\n"
+ "fsub z22.s, z15.s, z13.s\n"
+ "fadd z29.s, z29.s, z0.s\n"
+ "st1w { z22.s }, p0, [x9]\n"
+ "fadd z22.s, z29.s, z12.s\n"
+ "fsub z15.s, z26.s, z5.s\n"
+ "fmul z13.s, z3.s, z2.s[1]\n"
+ "fsub z12.s, z12.s, z29.s\n"
+ "fmsb z5.s, p1/M, z16.s, z26.s\n"
+ "fmul z26.s, z3.s, z2.s[2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z25.s, z2.s[1]\n"
+ "fneg z26.s, p1/M, z26.s\n"
+ "fadd z26.s, z26.s, z25.s\n"
+ "fadd z21.s, z13.s, z15.s\n"
+ "st1w { z21.s }, p0, [x11, %x[output_col_1_stride], LSL #2]\n"
+ "fsub z21.s, z15.s, z13.s\n"
+ "fmul z13.s, z6.s, z2.s[1]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "st1w { z21.s }, p0, [x9, %x[output_col_1_stride], LSL #2]\n"
+ "fadd z21.s, z26.s, z5.s\n"
+ "fsub z15.s, z18.s, z24.s\n"
+ "fmla z13.s, z1.s, z2.s[1]\n"
+ "fsub z5.s, z5.s, z26.s\n"
+ "fmsb z24.s, p1/M, z16.s, z18.s\n"
+ "fmul z18.s, z6.s, z2.s[2]\n"
+ "fadd z20.s, z13.s, z15.s\n"
+ "st1w { z20.s }, p0, [x11, x25, LSL #2]\n"
+ "fneg z18.s, p1/M, z18.s\n"
+ "fsub z20.s, z15.s, z13.s\n"
+ "fadd z18.s, z18.s, z1.s\n"
+ "st1w { z20.s }, p0, [x9, x25, LSL #2]\n"
+ "fadd z20.s, z18.s, z24.s\n"
+ "fsub z15.s, z11.s, z4.s\n"
+ "fmul z13.s, z28.s, z2.s[1]\n"
+ "fsub z24.s, z24.s, z18.s\n"
+ "fmsb z4.s, p1/M, z16.s, z11.s\n"
+ "fmul z11.s, z28.s, z2.s[2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z27.s, z2.s[1]\n"
+ "fneg z11.s, p1/M, z11.s\n"
+ "fadd z11.s, z11.s, z27.s\n"
+ "fadd z26.s, z13.s, z15.s\n"
+ "st1w { z26.s }, p0, [x11, x23, LSL #2]\n"
+ "fsub z26.s, z15.s, z13.s\n"
+ "fmul z13.s, z8.s, z2.s[1]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "st1w { z26.s }, p0, [x9, x23, LSL #2]\n"
+ "fadd z26.s, z11.s, z4.s\n"
+ "fsub z15.s, z23.s, z30.s\n"
+ "fmla z13.s, z9.s, z2.s[1]\n"
+ "fsub z4.s, z4.s, z11.s\n"
+ "fmsb z30.s, p1/M, z16.s, z23.s\n"
+ "fmul z23.s, z8.s, z2.s[2]\n"
+ "fadd z18.s, z13.s, z15.s\n"
+ "st1w { z18.s }, p0, [x11, x21, LSL #2]\n"
+ "fneg z23.s, p1/M, z23.s\n"
+ "fsub z18.s, z15.s, z13.s\n"
+ "fadd z23.s, z23.s, z9.s\n"
+ "st1w { z18.s }, p0, [x9, x21, LSL #2]\n"
+ "fadd z18.s, z23.s, z30.s\n"
+ "fsub z15.s, z10.s, z14.s\n"
+ "fmul z13.s, z7.s, z2.s[1]\n"
+ "fsub z30.s, z30.s, z23.s\n"
+ "fmsb z14.s, p1/M, z16.s, z10.s\n"
+ "fmul z10.s, z7.s, z2.s[2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z31.s, z2.s[1]\n"
+ "fneg z10.s, p1/M, z10.s\n"
+ "fadd z10.s, z10.s, z31.s\n"
+ "fadd z17.s, z13.s, z15.s\n"
+ "st1w { z17.s }, p0, [x11, x8, LSL #2]\n"
+ "fsub z17.s, z15.s, z13.s\n"
+ "incb x11\n"
+ "st1w { z17.s }, p0, [x9, x8, LSL #2]\n"
+ "fadd z17.s, z10.s, z14.s\n"
+ "fsub z14.s, z14.s, z10.s\n"
+ "st1w { z22.s }, p0, [x15]\n"
+ "incb x9\n"
+ "st1w { z12.s }, p0, [x13]\n"
+ "st1w { z21.s }, p0, [x15, %x[output_col_1_stride], LSL #2]\n"
+ "st1w { z5.s }, p0, [x13, %x[output_col_1_stride], LSL #2]\n"
+ "st1w { z20.s }, p0, [x15, x25, LSL #2]\n"
+ "st1w { z24.s }, p0, [x13, x25, LSL #2]\n"
+ "st1w { z26.s }, p0, [x15, x23, LSL #2]\n"
+ "st1w { z4.s }, p0, [x13, x23, LSL #2]\n"
+ "st1w { z18.s }, p0, [x15, x21, LSL #2]\n"
+ "st1w { z30.s }, p0, [x13, x21, LSL #2]\n"
+ "st1w { z17.s }, p0, [x15, x8, LSL #2]\n"
+ "incb x15\n"
+ "st1w { z14.s }, p0, [x13, x8, LSL #2]\n"
+ "incb x13\n"
+ "ld1w { z23.s }, p0/Z, [x28]\n"
+ "ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n"
+ "fmul z13.s, z22.s, z2.s[1]\n"
+ "ld1w { z21.s }, p0/Z, [x28, x26, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x28, x24, LSL #2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "ld1w { z26.s }, p0/Z, [x28, x22, LSL #2]\n"
+ "fsub z15.s, z26.s, z21.s\n"
+ "fmad z23.s, p1/M, z16.s, z26.s\n"
+ "ld1w { z18.s }, p0/Z, [x28, x20, LSL #2]\n"
+ "fmla z13.s, z20.s, z2.s[1]\n"
+ "incb x28\n"
+ "fmls z23.s, z21.s, z2.s[3]\n"
+ "fsub z17.s, z15.s, z13.s\n"
+ "fadd z30.s, z13.s, z15.s\n"
+ "fmsb z21.s, p1/M, z16.s, z26.s\n"
+ "fmul z26.s, z22.s, z2.s[2]\n"
+ "fmad z22.s, p1/M, z16.s, z18.s\n"
+ "fmad z19.s, p1/M, z16.s, z23.s\n"
+ "fmad z28.s, p1/M, z16.s, z30.s\n"
+ "fneg z26.s, p1/M, z26.s\n"
+ "fadd z26.s, z26.s, z20.s\n"
+ "fmls z22.s, z20.s, z2.s[3]\n"
+ "fmls z19.s, z0.s, z2.s[3]\n"
+ "st1w { z19.s }, p0, [x27]\n"
+ "fadd z23.s, z26.s, z21.s\n"
+ "fsub z21.s, z21.s, z26.s\n"
+ "fmls z28.s, z27.s, z2.s[3]\n"
+ "fmad z8.s, p1/M, z16.s, z17.s\n"
+ "fmad z7.s, p1/M, z16.s, z22.s\n"
+ "fmad z3.s, p1/M, z16.s, z23.s\n"
+ "fmad z6.s, p1/M, z16.s, z21.s\n"
+ "fmls z8.s, z9.s, z2.s[3]\n"
+ "fmls z7.s, z31.s, z2.s[3]\n"
+ "fmls z3.s, z25.s, z2.s[3]\n"
+ "st1w { z3.s }, p0, [x27, %x[output_col_1_stride], LSL #2]\n"
+ "fmls z6.s, z1.s, z2.s[3]\n"
+ "st1w { z6.s }, p0, [x27, x25, LSL #2]\n"
+ "st1w { z28.s }, p0, [x27, x23, LSL #2]\n"
+ "st1w { z8.s }, p0, [x27, x21, LSL #2]\n"
+ "st1w { z7.s }, p0, [x27, x8, LSL #2]\n"
+ "incb x27\n"
+ "whilelt p0.s, XZR, %x[num_channels]\n"
+ "bne 1b\n"
+ "2:" // channel_loop_end
+ ".inst 0xd503467f // SMSTOP\n"
+ : [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output)
+ : [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride)
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp
new file mode 100644
index 0000000000..7b387e1247
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE)
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void sve_fp32_6x6(
+ const unsigned int num_channels,
+ const float *input,
+ const size_t input_row_stride,
+ const size_t input_col_stride,
+ float *output,
+ const size_t output_col_stride
+)
+{
+ const float B_values[4] = { 1.0f, 2.0f, 4.0f, 5.0f };
+ long long_channels = num_channels;
+
+ // Generated by armasmgen (February 04th, 2021)
+ __asm__ __volatile__(
+ "fmov z16.s, #4.0\n"
+ "ptrue p1.b\n"
+ "ld1rqw { z2.s }, p1/Z, [%x[B_values]]\n"
+ "add x16, %x[input_row_0], %x[input_row_stride], LSL #2\n"
+ "add x15, %x[output_row_0], %x[output_row_stride], LSL #2\n"
+ "add x14, %x[input_row_0], %x[input_row_stride], LSL #3\n"
+ "add x13, %x[output_row_0], %x[output_row_stride], LSL #3\n"
+ "add x12, x14, %x[input_row_stride], LSL #2\n"
+ "add x11, x13, %x[output_row_stride], LSL #2\n"
+ "add x10, %x[input_row_0], %x[input_row_stride], LSL #4\n"
+ "add x9, %x[output_row_0], %x[output_row_stride], LSL #4\n"
+ "add x28, x10, %x[input_row_stride], LSL #2\n"
+ "add x27, x9, %x[output_row_stride], LSL #2\n"
+ "lsl x26, %x[input_col_1_stride], #0x1\n"
+ "lsl x25, %x[output_col_1_stride], #0x1\n"
+ "add x24, x26, %x[input_col_1_stride]\n"
+ "add x23, x25, %x[output_col_1_stride]\n"
+ "lsl x22, %x[input_col_1_stride], #0x2\n"
+ "lsl x21, %x[output_col_1_stride], #0x2\n"
+ "add x20, x22, %x[input_col_1_stride]\n"
+ "add x8, x21, %x[output_col_1_stride]\n"
+ "whilelt p0.s, XZR, %x[num_channels]\n"
+ "beq 2f\n"
+ "1:" // channel_loop
+ "ld1w { z31.s }, p0/Z, [%x[input_row_0]]\n"
+ "decw %x[num_channels]\n"
+ "ld1w { z28.s }, p0/Z, [%x[input_row_0], %x[input_col_1_stride], LSL #2]\n"
+ "fmul z13.s, z28.s, z2.s[1]\n"
+ "ld1w { z27.s }, p0/Z, [%x[input_row_0], x26, LSL #2]\n"
+ "ld1w { z11.s }, p0/Z, [%x[input_row_0], x24, LSL #2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "ld1w { z7.s }, p0/Z, [%x[input_row_0], x22, LSL #2]\n"
+ "fsub z15.s, z7.s, z27.s\n"
+ "fmad z31.s, p1/M, z16.s, z7.s\n"
+ "ld1w { z3.s }, p0/Z, [%x[input_row_0], x20, LSL #2]\n"
+ "fmla z13.s, z11.s, z2.s[1]\n"
+ "ld1w { z12.s }, p0/Z, [x14]\n"
+ "incb %x[input_row_0]\n"
+ "fmls z31.s, z27.s, z2.s[3]\n"
+ "ld1w { z14.s }, p0/Z, [x14, %x[input_col_1_stride], LSL #2]\n"
+ "fsub z25.s, z15.s, z13.s\n"
+ "fadd z8.s, z13.s, z15.s\n"
+ "ld1w { z24.s }, p0/Z, [x14, x26, LSL #2]\n"
+ "fmsb z27.s, p1/M, z16.s, z7.s\n"
+ "ld1w { z22.s }, p0/Z, [x14, x24, LSL #2]\n"
+ "fmul z7.s, z28.s, z2.s[2]\n"
+ "ld1w { z1.s }, p0/Z, [x14, x22, LSL #2]\n"
+ "fsub z15.s, z1.s, z24.s\n"
+ "fneg z7.s, p1/M, z7.s\n"
+ "ld1w { z20.s }, p0/Z, [x14, x20, LSL #2]\n"
+ "fadd z7.s, z7.s, z11.s\n"
+ "ld1w { z29.s }, p0/Z, [x10]\n"
+ "incb x14\n"
+ "fmad z28.s, p1/M, z16.s, z3.s\n"
+ "ld1w { z10.s }, p0/Z, [x10, %x[input_col_1_stride], LSL #2]\n"
+ "fmad z12.s, p1/M, z16.s, z1.s\n"
+ "ld1w { z18.s }, p0/Z, [x10, x26, LSL #2]\n"
+ "fmul z13.s, z14.s, z2.s[1]\n"
+ "ld1w { z19.s }, p0/Z, [x10, x24, LSL #2]\n"
+ "fadd z17.s, z7.s, z27.s\n"
+ "ld1w { z9.s }, p0/Z, [x10, x22, LSL #2]\n"
+ "fsub z27.s, z27.s, z7.s\n"
+ "fmls z28.s, z11.s, z2.s[3]\n"
+ "ld1w { z21.s }, p0/Z, [x10, x20, LSL #2]\n"
+ "incb x10\n"
+ "fmls z12.s, z24.s, z2.s[3]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z22.s, z2.s[1]\n"
+ "fsub z30.s, z15.s, z13.s\n"
+ "fadd z4.s, z13.s, z15.s\n"
+ "fmsb z24.s, p1/M, z16.s, z1.s\n"
+ "fsub z15.s, z9.s, z18.s\n"
+ "fmul z1.s, z14.s, z2.s[2]\n"
+ "fmad z14.s, p1/M, z16.s, z20.s\n"
+ "fmad z29.s, p1/M, z16.s, z9.s\n"
+ "fmul z13.s, z10.s, z2.s[1]\n"
+ "fneg z1.s, p1/M, z1.s\n"
+ "fadd z1.s, z1.s, z22.s\n"
+ "fmls z14.s, z22.s, z2.s[3]\n"
+ "fmls z29.s, z18.s, z2.s[3]\n"
+ "fadd z5.s, z1.s, z24.s\n"
+ "fsub z24.s, z24.s, z1.s\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z19.s, z2.s[1]\n"
+ "fsub z23.s, z15.s, z13.s\n"
+ "fadd z11.s, z13.s, z15.s\n"
+ "fmsb z18.s, p1/M, z16.s, z9.s\n"
+ "fmul z9.s, z10.s, z2.s[2]\n"
+ "fmad z10.s, p1/M, z16.s, z21.s\n"
+ "fmad z31.s, p1/M, z16.s, z29.s\n"
+ "fmad z8.s, p1/M, z16.s, z11.s\n"
+ "fneg z9.s, p1/M, z9.s\n"
+ "fadd z9.s, z9.s, z19.s\n"
+ "fmls z10.s, z19.s, z2.s[3]\n"
+ "fmls z31.s, z12.s, z2.s[3]\n"
+ "st1w { z31.s }, p0, [%x[output_row_0]]\n"
+ "fadd z26.s, z9.s, z18.s\n"
+ "fsub z18.s, z18.s, z9.s\n"
+ "fmls z8.s, z4.s, z2.s[3]\n"
+ "fmad z25.s, p1/M, z16.s, z23.s\n"
+ "fmad z28.s, p1/M, z16.s, z10.s\n"
+ "fmad z17.s, p1/M, z16.s, z26.s\n"
+ "fmad z27.s, p1/M, z16.s, z18.s\n"
+ "fmls z25.s, z30.s, z2.s[3]\n"
+ "fmls z28.s, z14.s, z2.s[3]\n"
+ "fmls z17.s, z5.s, z2.s[3]\n"
+ "st1w { z17.s }, p0, [%x[output_row_0], %x[output_col_1_stride], LSL #2]\n"
+ "fmls z27.s, z24.s, z2.s[3]\n"
+ "st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n"
+ "st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n"
+ "st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n"
+ "st1w { z28.s }, p0, [%x[output_row_0], x8, LSL #2]\n"
+ "incb %x[output_row_0]\n"
+ "ld1w { z19.s }, p0/Z, [x16]\n"
+ "ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n"
+ "fmul z13.s, z7.s, z2.s[1]\n"
+ "ld1w { z6.s }, p0/Z, [x16, x26, LSL #2]\n"
+ "ld1w { z27.s }, p0/Z, [x16, x24, LSL #2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "ld1w { z25.s }, p0/Z, [x16, x22, LSL #2]\n"
+ "fsub z15.s, z25.s, z6.s\n"
+ "fmad z19.s, p1/M, z16.s, z25.s\n"
+ "ld1w { z20.s }, p0/Z, [x16, x20, LSL #2]\n"
+ "fmla z13.s, z27.s, z2.s[1]\n"
+ "ld1w { z0.s }, p0/Z, [x12]\n"
+ "incb x16\n"
+ "fmls z19.s, z6.s, z2.s[3]\n"
+ "ld1w { z31.s }, p0/Z, [x12, %x[input_col_1_stride], LSL #2]\n"
+ "fsub z8.s, z15.s, z13.s\n"
+ "fadd z28.s, z13.s, z15.s\n"
+ "ld1w { z1.s }, p0/Z, [x12, x26, LSL #2]\n"
+ "fmsb z6.s, p1/M, z16.s, z25.s\n"
+ "ld1w { z21.s }, p0/Z, [x12, x24, LSL #2]\n"
+ "fmul z25.s, z7.s, z2.s[2]\n"
+ "ld1w { z22.s }, p0/Z, [x12, x22, LSL #2]\n"
+ "fsub z15.s, z22.s, z1.s\n"
+ "fneg z25.s, p1/M, z25.s\n"
+ "ld1w { z17.s }, p0/Z, [x12, x20, LSL #2]\n"
+ "fadd z25.s, z25.s, z27.s\n"
+ "incb x12\n"
+ "fmad z7.s, p1/M, z16.s, z20.s\n"
+ "fmad z0.s, p1/M, z16.s, z22.s\n"
+ "fmul z13.s, z31.s, z2.s[1]\n"
+ "fadd z3.s, z25.s, z6.s\n"
+ "fsub z6.s, z6.s, z25.s\n"
+ "fmls z7.s, z27.s, z2.s[3]\n"
+ "fmls z0.s, z1.s, z2.s[3]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z21.s, z2.s[1]\n"
+ "fsub z9.s, z15.s, z13.s\n"
+ "fadd z27.s, z13.s, z15.s\n"
+ "fmsb z1.s, p1/M, z16.s, z22.s\n"
+ "fsub z15.s, z29.s, z12.s\n"
+ "fmul z22.s, z31.s, z2.s[2]\n"
+ "fmad z31.s, p1/M, z16.s, z17.s\n"
+ "fmul z13.s, z19.s, z2.s[1]\n"
+ "fmsb z12.s, p1/M, z16.s, z29.s\n"
+ "fneg z22.s, p1/M, z22.s\n"
+ "fadd z22.s, z22.s, z21.s\n"
+ "fmls z31.s, z21.s, z2.s[3]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fadd z25.s, z22.s, z1.s\n"
+ "fsub z1.s, z1.s, z22.s\n"
+ "fmla z13.s, z0.s, z2.s[1]\n"
+ "fmul z29.s, z19.s, z2.s[2]\n"
+ "fadd z22.s, z13.s, z15.s\n"
+ "st1w { z22.s }, p0, [x11]\n"
+ "fneg z29.s, p1/M, z29.s\n"
+ "fsub z22.s, z15.s, z13.s\n"
+ "fadd z29.s, z29.s, z0.s\n"
+ "st1w { z22.s }, p0, [x9]\n"
+ "fadd z22.s, z29.s, z12.s\n"
+ "fsub z15.s, z26.s, z5.s\n"
+ "fmul z13.s, z3.s, z2.s[1]\n"
+ "fsub z12.s, z12.s, z29.s\n"
+ "fmsb z5.s, p1/M, z16.s, z26.s\n"
+ "fmul z26.s, z3.s, z2.s[2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z25.s, z2.s[1]\n"
+ "fneg z26.s, p1/M, z26.s\n"
+ "fadd z26.s, z26.s, z25.s\n"
+ "fadd z21.s, z13.s, z15.s\n"
+ "st1w { z21.s }, p0, [x11, %x[output_col_1_stride], LSL #2]\n"
+ "fsub z21.s, z15.s, z13.s\n"
+ "fmul z13.s, z6.s, z2.s[1]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "st1w { z21.s }, p0, [x9, %x[output_col_1_stride], LSL #2]\n"
+ "fadd z21.s, z26.s, z5.s\n"
+ "fsub z15.s, z18.s, z24.s\n"
+ "fmla z13.s, z1.s, z2.s[1]\n"
+ "fsub z5.s, z5.s, z26.s\n"
+ "fmsb z24.s, p1/M, z16.s, z18.s\n"
+ "fmul z18.s, z6.s, z2.s[2]\n"
+ "fadd z20.s, z13.s, z15.s\n"
+ "st1w { z20.s }, p0, [x11, x25, LSL #2]\n"
+ "fneg z18.s, p1/M, z18.s\n"
+ "fsub z20.s, z15.s, z13.s\n"
+ "fadd z18.s, z18.s, z1.s\n"
+ "st1w { z20.s }, p0, [x9, x25, LSL #2]\n"
+ "fadd z20.s, z18.s, z24.s\n"
+ "fsub z15.s, z11.s, z4.s\n"
+ "fmul z13.s, z28.s, z2.s[1]\n"
+ "fsub z24.s, z24.s, z18.s\n"
+ "fmsb z4.s, p1/M, z16.s, z11.s\n"
+ "fmul z11.s, z28.s, z2.s[2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z27.s, z2.s[1]\n"
+ "fneg z11.s, p1/M, z11.s\n"
+ "fadd z11.s, z11.s, z27.s\n"
+ "fadd z26.s, z13.s, z15.s\n"
+ "st1w { z26.s }, p0, [x11, x23, LSL #2]\n"
+ "fsub z26.s, z15.s, z13.s\n"
+ "fmul z13.s, z8.s, z2.s[1]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "st1w { z26.s }, p0, [x9, x23, LSL #2]\n"
+ "fadd z26.s, z11.s, z4.s\n"
+ "fsub z15.s, z23.s, z30.s\n"
+ "fmla z13.s, z9.s, z2.s[1]\n"
+ "fsub z4.s, z4.s, z11.s\n"
+ "fmsb z30.s, p1/M, z16.s, z23.s\n"
+ "fmul z23.s, z8.s, z2.s[2]\n"
+ "fadd z18.s, z13.s, z15.s\n"
+ "st1w { z18.s }, p0, [x11, x21, LSL #2]\n"
+ "fneg z23.s, p1/M, z23.s\n"
+ "fsub z18.s, z15.s, z13.s\n"
+ "fadd z23.s, z23.s, z9.s\n"
+ "st1w { z18.s }, p0, [x9, x21, LSL #2]\n"
+ "fadd z18.s, z23.s, z30.s\n"
+ "fsub z15.s, z10.s, z14.s\n"
+ "fmul z13.s, z7.s, z2.s[1]\n"
+ "fsub z30.s, z30.s, z23.s\n"
+ "fmsb z14.s, p1/M, z16.s, z10.s\n"
+ "fmul z10.s, z7.s, z2.s[2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "fmla z13.s, z31.s, z2.s[1]\n"
+ "fneg z10.s, p1/M, z10.s\n"
+ "fadd z10.s, z10.s, z31.s\n"
+ "fadd z17.s, z13.s, z15.s\n"
+ "st1w { z17.s }, p0, [x11, x8, LSL #2]\n"
+ "fsub z17.s, z15.s, z13.s\n"
+ "incb x11\n"
+ "st1w { z17.s }, p0, [x9, x8, LSL #2]\n"
+ "fadd z17.s, z10.s, z14.s\n"
+ "fsub z14.s, z14.s, z10.s\n"
+ "st1w { z22.s }, p0, [x15]\n"
+ "incb x9\n"
+ "st1w { z12.s }, p0, [x13]\n"
+ "st1w { z21.s }, p0, [x15, %x[output_col_1_stride], LSL #2]\n"
+ "st1w { z5.s }, p0, [x13, %x[output_col_1_stride], LSL #2]\n"
+ "st1w { z20.s }, p0, [x15, x25, LSL #2]\n"
+ "st1w { z24.s }, p0, [x13, x25, LSL #2]\n"
+ "st1w { z26.s }, p0, [x15, x23, LSL #2]\n"
+ "st1w { z4.s }, p0, [x13, x23, LSL #2]\n"
+ "st1w { z18.s }, p0, [x15, x21, LSL #2]\n"
+ "st1w { z30.s }, p0, [x13, x21, LSL #2]\n"
+ "st1w { z17.s }, p0, [x15, x8, LSL #2]\n"
+ "incb x15\n"
+ "st1w { z14.s }, p0, [x13, x8, LSL #2]\n"
+ "incb x13\n"
+ "ld1w { z23.s }, p0/Z, [x28]\n"
+ "ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n"
+ "fmul z13.s, z22.s, z2.s[1]\n"
+ "ld1w { z21.s }, p0/Z, [x28, x26, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x28, x24, LSL #2]\n"
+ "fneg z13.s, p1/M, z13.s\n"
+ "ld1w { z26.s }, p0/Z, [x28, x22, LSL #2]\n"
+ "fsub z15.s, z26.s, z21.s\n"
+ "fmad z23.s, p1/M, z16.s, z26.s\n"
+ "ld1w { z18.s }, p0/Z, [x28, x20, LSL #2]\n"
+ "fmla z13.s, z20.s, z2.s[1]\n"
+ "incb x28\n"
+ "fmls z23.s, z21.s, z2.s[3]\n"
+ "fsub z17.s, z15.s, z13.s\n"
+ "fadd z30.s, z13.s, z15.s\n"
+ "fmsb z21.s, p1/M, z16.s, z26.s\n"
+ "fmul z26.s, z22.s, z2.s[2]\n"
+ "fmad z22.s, p1/M, z16.s, z18.s\n"
+ "fmad z19.s, p1/M, z16.s, z23.s\n"
+ "fmad z28.s, p1/M, z16.s, z30.s\n"
+ "fneg z26.s, p1/M, z26.s\n"
+ "fadd z26.s, z26.s, z20.s\n"
+ "fmls z22.s, z20.s, z2.s[3]\n"
+ "fmls z19.s, z0.s, z2.s[3]\n"
+ "st1w { z19.s }, p0, [x27]\n"
+ "fadd z23.s, z26.s, z21.s\n"
+ "fsub z21.s, z21.s, z26.s\n"
+ "fmls z28.s, z27.s, z2.s[3]\n"
+ "fmad z8.s, p1/M, z16.s, z17.s\n"
+ "fmad z7.s, p1/M, z16.s, z22.s\n"
+ "fmad z3.s, p1/M, z16.s, z23.s\n"
+ "fmad z6.s, p1/M, z16.s, z21.s\n"
+ "fmls z8.s, z9.s, z2.s[3]\n"
+ "fmls z7.s, z31.s, z2.s[3]\n"
+ "fmls z3.s, z25.s, z2.s[3]\n"
+ "st1w { z3.s }, p0, [x27, %x[output_col_1_stride], LSL #2]\n"
+ "fmls z6.s, z1.s, z2.s[3]\n"
+ "st1w { z6.s }, p0, [x27, x25, LSL #2]\n"
+ "st1w { z28.s }, p0, [x27, x23, LSL #2]\n"
+ "st1w { z8.s }, p0, [x27, x21, LSL #2]\n"
+ "st1w { z7.s }, p0, [x27, x8, LSL #2]\n"
+ "incb x27\n"
+ "whilelt p0.s, XZR, %x[num_channels]\n"
+ "bne 1b\n"
+ "2:" // channel_loop_end
+
+ : [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output)
+ : [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride)
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace input_transform
+} // namespace winograd
+} // namespace arm_conv
+
+#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE)