aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution/winograd/transforms
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/convolution/winograd/transforms')
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp261
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp311
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp376
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp163
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp375
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp369
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp171
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp428
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp179
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp228
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp408
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp266
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp125
15 files changed, 0 insertions, 3908 deletions
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp
deleted file mode 100644
index e66300d39a..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_1x8_fp32_process_tile(
- int n_channels,
- const float* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* const matrix_base,
- const int matrix_stride,
- const int _pad_top,
- const int _pad_left,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- (void) input_row_stride; // No rows over which to stride
- (void) _pad_top; // Never any top padding
- (void) _pad_bottom; // Never any bottom padding
-
- // Extract padding arguments
- const int pad_left = Specialized ? PadLeft : _pad_left;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- constexpr int inner_tile_cols = 8;
- const int cells_j = inner_tile_cols - pad_right;
-
- float *outptr = matrix_base;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_cols];
- for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
- {
- x_ptrs[j] = input_base + xj*input_col_stride;
- }
-
- // Vectors used/computed in this kernel.
- float x[inner_tile_cols];
- float U[inner_tile_cols];
-
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = 0.0f;
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- float32x4_t x[inner_tile_cols], U[inner_tile_cols];
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = vdupq_n_f32(0.0f);
- }
-
- // Load x
- for (int j = pad_left; j < cells_j; j++)
- {
- x[j] = vld1q_f32(x_ptrs[j]);
- x_ptrs[j] += 4;
- }
-
- // Compute U = x . X
- U[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
- U[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
- U[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
- U[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
- U[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
- U[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
- U[6] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
- U[7] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
-
- // Store the transformed vector
- for (int j = 0; j < inner_tile_cols; j++)
- {
- vst1q_f32(outptr + j*matrix_stride, U[j]);
- }
- outptr += 4;
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- float32x2_t x[inner_tile_cols], U[inner_tile_cols];
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = vdup_n_f32(0.0f);
- }
-
- // Load x
- for (int j = pad_left; j < cells_j; j++)
- {
- x[j] = vld1_f32(x_ptrs[j]);
- x_ptrs[j] += 2;
- }
-
- // Compute U = x . X
- U[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
- U[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
- U[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
- U[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
- U[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
- U[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
- U[6] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
- U[7] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
-
- // Store the transformed vector
- for (int j = 0; j < inner_tile_cols; j++)
- {
- vst1_f32(outptr + j*matrix_stride, U[j]);
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int j = pad_left; j < cells_j; j++)
- {
- x[j] = *(x_ptrs[j]++);
- }
-
- // Compute U = x . X
- U[0] = x[0]*-36 + x[4]*-14 + x[2]*49 + x[6]*1;
- U[1] = x[5]*-1 + x[1]*-36 + x[4]*-13 + x[3]*13 + x[2]*36 + x[6]*1;
- U[2] = x[3]*-13 + x[4]*-13 + x[1]*36 + x[2]*36 + x[5]*1 + x[6]*1;
- U[3] = x[1]*-18 + x[4]*-10 + x[5]*-2 + x[2]*9 + x[3]*20 + x[6]*1;
- U[4] = x[3]*-20 + x[4]*-10 + x[5]*2 + x[2]*9 + x[1]*18 + x[6]*1;
- U[5] = x[1]*-12 + x[4]*-5 + x[5]*-3 + x[2]*4 + x[3]*15 + x[6]*1;
- U[6] = x[3]*-15 + x[4]*-5 + x[5]*3 + x[2]*4 + x[1]*12 + x[6]*1;
- U[7] = x[1]*-36 + x[5]*-14 + x[3]*49 + x[7]*1;
-
- // Store the transformed vector
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = U[j];
- }
- outptr++;
- }
-}
-
-}
-
-namespace winograd
-{
-template <int x>
-using Tiles = InputTransformImplTiles<1, x, 1, 8, float>;
-
-/*****************************************************************************/
-// 1x3 specialisations
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-/*****************************************************************************/
-// 1x5 specialisations
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 2, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-/*****************************************************************************/
-// 1x7 specialisations
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 1, 0, 0>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 3, 0, 0>,
-};
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-
-template class InputTransform<1, 3, 1, 8, float>;
-template class InputTransform<3, 1, 8, 1, float>;
-template class InputTransform<1, 5, 1, 8, float>;
-template class InputTransform<5, 1, 8, 1, float>;
-template class InputTransform<1, 7, 1, 8, float>;
-template class InputTransform<7, 1, 8, 1, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
deleted file mode 100644
index 4203945dd3..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace winograd
-{
-
-using Tiles = InputTransformImplTiles<3, 3, 4, 4, float>;
-
-namespace
-{
-
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_4x4_fp32_process_tile(
- int n_channels,
- const float* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* const matrix_base,
- const int matrix_stride,
- const int _pad_top,
- const int _pad_left,
- const int _pad_bottom,
- const int _pad_right
- )
-{
-const int pad_top = Specialized ? PadTop : _pad_top;
- const int pad_left = Specialized ? PadLeft : _pad_left;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- constexpr int inner_tile_i = 4, inner_tile_j = 4;
- const int cells_i = inner_tile_i - pad_bottom;
- const int cells_j = inner_tile_i - pad_right;
-
-
-
- float *outptr = matrix_base;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_i][inner_tile_j];
- for (int i = pad_top, xi = 0; i < cells_i; i++, xi++)
- {
- // Get a pointer into the row
- const float* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- float x[inner_tile_i][inner_tile_j];
- float XTx[inner_tile_i][inner_tile_j];
- float U[inner_tile_i][inner_tile_j];
-
- for (int i = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used/computed in this kernel.
- float32x4_t x[inner_tile_i][inner_tile_j];
- float32x4_t XTx[inner_tile_i][inner_tile_j];
- float32x4_t U[inner_tile_i][inner_tile_j];
-
- for (int i = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++)
- {
- x[i][j] = vdupq_n_f32(0.0f);
- XTx[i][j] = vdupq_n_f32(0.0f);
- }
- }
-
- // Load x
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = vld1q_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 4;
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- // XTx[0][j] = x[0][j] - x[2][j];
- XTx[0][j] = vsubq_f32(x[0][j], x[2][j]);
-
- // XTx[1][j] = x[1][j] + x[2][j];
- XTx[1][j] = vaddq_f32(x[1][j], x[2][j]);
-
- // XTx[2][j] = x[2][j] - x[1][j];
- XTx[2][j] = vsubq_f32(x[2][j], x[1][j]);
-
- // XTx[3][j] = x[1][j] - x[3][j];
- XTx[3][j] = vsubq_f32(x[1][j], x[3][j]);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_i; i++)
- {
- // U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][0] = vsubq_f32(XTx[i][0], XTx[i][2]);
-
- // U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][1] = vaddq_f32(XTx[i][1], XTx[i][2]);
-
- // U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][2] = vsubq_f32(XTx[i][2], XTx[i][1]);
-
- // U[i][3] = XTx[i][1] - XTx[i][3];
- U[i][3] = vsubq_f32(XTx[i][1], XTx[i][3]);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used/computed in this kernel.
- float32x2_t x[inner_tile_i][inner_tile_j];
- float32x2_t XTx[inner_tile_i][inner_tile_j];
- float32x2_t U[inner_tile_i][inner_tile_j];
-
- for (int i = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++)
- {
- x[i][j] = vdup_n_f32(0.0f);
- XTx[i][j] = vdup_n_f32(0.0f);
- }
- }
-
- // Load x
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = vld1_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 2;
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- // XTx[0][j] = x[0][j] - x[2][j];
- XTx[0][j] = vsub_f32(x[0][j], x[2][j]);
-
- // XTx[1][j] = x[1][j] + x[2][j];
- XTx[1][j] = vadd_f32(x[1][j], x[2][j]);
-
- // XTx[2][j] = x[2][j] - x[1][j];
- XTx[2][j] = vsub_f32(x[2][j], x[1][j]);
-
- // XTx[3][j] = x[1][j] - x[3][j];
- XTx[3][j] = vsub_f32(x[1][j], x[3][j]);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_i; i++)
- {
- // U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][0] = vsub_f32(XTx[i][0], XTx[i][2]);
-
- // U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][1] = vadd_f32(XTx[i][1], XTx[i][2]);
-
- // U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][2] = vsub_f32(XTx[i][2], XTx[i][1]);
-
- // U[i][3] = XTx[i][1] - XTx[i][3];
- U[i][3] = vsub_f32(XTx[i][1], XTx[i][3]);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- XTx[0][j] = x[0][j] - x[2][j];
- XTx[1][j] = x[1][j] + x[2][j];
- XTx[2][j] = x[2][j] - x[1][j];
- XTx[3][j] = x[1][j] - x[3][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_i; i++)
- {
- U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][3] = XTx[i][1] - XTx[i][3];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-
-} // namespace (anonymous)
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_input_transform_4x4_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_input_transform_4x4_fp32_process_tile<true>;
-
-
-template <>
-const Tiles::TileFn Tiles::tilefn_top_padded[n_pad_top] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 1, 0, 0, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 1, 0>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 2, 0>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 3, 0>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 4, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 4>,
-};
-
-template class InputTransform<3, 3, 4, 4, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp
deleted file mode 100644
index 893122cc45..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_6x6_fp32_process_tile(
- int n_channels,
- const float* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* const matrix_base,
-const int matrix_stride,
- const int _pad_top,
- const int _pad_left,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- const int pad_top = Specialized ? PadTop : _pad_top;
- const int pad_left = Specialized ? PadLeft : _pad_left;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- constexpr int inner_tile_rows = 6;
- constexpr int inner_tile_cols = 6;
-
- const int cells_i = inner_tile_rows - pad_bottom;
- const int cells_j = inner_tile_cols - pad_right;
-
- float *outptr = matrix_base;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_rows][inner_tile_cols];
- for (int i = pad_top, xi = 0; i < cells_i; i++, xi++)
- {
- // Get a pointer into the row
- const float* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- float x[inner_tile_rows][inner_tile_cols];
- float XTx[inner_tile_rows][inner_tile_cols];
- float U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used/computed in this kernel
- float32x4_t x[inner_tile_rows][inner_tile_cols];
- float32x4_t XTx[inner_tile_rows][inner_tile_cols];
- float32x4_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdupq_n_f32(0.0f);
- XTx[i][j] = vdupq_n_f32(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = vld1q_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 4;
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vmlsq_n_f32(vmlaq_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vmlsq_n_f32(vaddq_f32(x[3][j], x[4][j]), vaddq_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[3][j]), vsubq_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[2][j]), vsubq_f32(x[3][j], x[1][j]), 2.0f);
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[2][j]), vsubq_f32(x[1][j], x[3][j]), 2.0f);
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vmlsq_n_f32(vmlaq_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vmlsq_n_f32(vmlaq_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vmlsq_n_f32(vaddq_f32(XTx[i][3], XTx[i][4]), vaddq_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][3]), vsubq_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][2]), vsubq_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][2]), vsubq_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vmlsq_n_f32(vmlaq_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used/computed in this kernel
- float32x2_t x[inner_tile_rows][inner_tile_cols];
- float32x2_t XTx[inner_tile_rows][inner_tile_cols];
- float32x2_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdup_n_f32(0.0f);
- XTx[i][j] = vdup_n_f32(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = vld1_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 2;
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-}
-
-namespace winograd
-{
-template <int k>
-using Tiles = InputTransformImplTiles<k, k, 6, 6, float>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_generic = winograd_input_transform_6x6_fp32_process_tile<false>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_unpadded = winograd_input_transform_6x6_fp32_process_tile<true>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_top_padded[n_pad_top] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 1, 0, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 1, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 2, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 3, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 4, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 5, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 6, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 6>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_generic = winograd_input_transform_6x6_fp32_process_tile<false>;
-
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_unpadded = winograd_input_transform_6x6_fp32_process_tile<true>;
-
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_top_padded[n_pad_top] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 2, 0, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 2, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 1, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 2, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 3, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 4, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 5, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 6, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 6>,
-};
-
-template class InputTransform<3, 3, 6, 6, float>;
-template class InputTransform<5, 5, 6, 6, float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
deleted file mode 100644
index ea842a45ee..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_2_7_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- (void) output_row_stride;
- (void) _pad_bottom;
- constexpr int output_tile_cols = 2;
- constexpr int inner_tile_cols = 8;
-
- const int pad_right = Specialized ? PadRight : _pad_right;
- const int cells_j = output_tile_cols - pad_right;
-
-
- // Construct a map to the output cells
- float *outptrs[cells_j];
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[j] = output + j*output_col_stride;
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1q_f32(inptr + j*matrix_stride);
- }
- inptr += 4;
-
- f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[j], f[j] + b);
- outptrs[j] += 4;
- }
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1_f32(inptr + j*matrix_stride);
- }
- inptr += 2;
-
- f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[j], f[j] + b);
- outptrs[j] += 2;
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = *(inptr + j*matrix_stride);
- }
- inptr++;
-
- f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
- f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1 + F[7]*1;
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = *(bptr++);
- }
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[j]++) = f[j] + b;
- }
- }
-}
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 7, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2_7_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_2_7_fp32_process_tile<true, 1>
-};
-
-template class OutputTransform<1, 7, 1, 8, float>;
-template class OutputTransform<7, 1, 8, 1, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp
deleted file mode 100644
index 597b074026..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_2x2_3x3_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- constexpr int OutputTileRows = 2, OutputTileCols = 2;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- const int cells_i = OutputTileRows - pad_bottom;
- const int cells_j = OutputTileCols - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[OutputTileRows][OutputTileCols];
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- if (bptr)
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Load the bias vector
- b = vld1q_f32(bptr);
- bptr += 4;
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Load the bias vector
- b = vld1_f32(bptr);
- bptr += 2;
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- }
-
- // Load the bias
- b = *(bptr++);
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j] + b;
- }
- }
- }
- }
- else
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[4][4], FZ[4][2], f[2][2];
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[4][4], FZ[4][2], f[2][2];
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[4][4], FZ[4][2], f[2][2];
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j];
- }
- }
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<3, 3, 4, 4, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_2x2_3x3_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2x2_3x3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_output_transform_2x2_3x3_fp32_process_tile<true, 1, 0>
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_2x2_3x3_fp32_process_tile<true, 0, 1>
-};
-
-template class OutputTransform<3, 3, 4, 4, float>;
-} // namespace winograd
-
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp
deleted file mode 100644
index 60d7181d97..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_2x2_5x5_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- constexpr int OutputTileRows = 2, OutputTileCols = 2;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- const int cells_i = 2 - pad_bottom;
- const int cells_j = 2 - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[OutputTileRows][OutputTileCols];
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- if (bptr)
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1q_f32(bptr);
- bptr += 4;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1_f32(bptr);
- bptr += 2;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- b = *(bptr++);
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j] + b;
- }
- }
- }
- }
- else
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][2], f[2][2];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][2], f[2][2];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][2], f[2][2];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j];
- }
- }
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<5, 5, 6, 6, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_2x2_5x5_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2x2_5x5_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_output_transform_2x2_5x5_fp32_process_tile<true, 1, 0>
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_2x2_5x5_fp32_process_tile<true, 0, 1>
-};
-
-template class OutputTransform<5, 5, 6, 6, float>;
-} // namespace winograd
-
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
deleted file mode 100644
index 911759b128..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_4_5_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- (void) output_row_stride;
- (void) _pad_bottom;
- constexpr int output_tile_cols = 4;
- constexpr int inner_tile_cols = 8;
-
- const int pad_right = Specialized ? PadRight : _pad_right;
- const int cells_j = output_tile_cols - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[cells_j];
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[j] = output + j*output_col_stride;
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1q_f32(inptr + j*matrix_stride);
- }
- inptr += 4;
-
- f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[j], f[j] + b);
- outptrs[j] += 4;
- }
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1_f32(inptr + j*matrix_stride);
- }
- inptr += 2;
-
- f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[j], f[j] + b);
- outptrs[j] += 2;
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = *(inptr + j*matrix_stride);
- }
- inptr++;
-
- f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
- f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
- f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
- f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1 + F[7]*1;
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = *(bptr++);
- }
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[j]++) = f[j] + b;
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 5, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_4_5_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_4_5_fp32_process_tile<true, 1>,
- winograd_output_transform_4_5_fp32_process_tile<true, 2>,
- winograd_output_transform_4_5_fp32_process_tile<true, 3>
-};
-
-template class OutputTransform<1, 5, 1, 8, float>;
-template class OutputTransform<5, 1, 8, 1, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp
deleted file mode 100644
index 15cc04b352..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_4x4_3x3_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
- constexpr int TileRows = 4, TileCols = 4;
-
- const int cells_i = TileRows - pad_bottom;
- const int cells_j = TileCols - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[TileRows][TileCols];
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- if (bptr)
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1q_f32(bptr);
- bptr += 4;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1_f32(bptr);
- bptr += 2;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
- outptrs[i][j] += 2;
- }
- }
- }
-#endif
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- b = *(bptr++);
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j] + b;
- }
- }
- }
- }
- else
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][4], f[4][4];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][4], f[4][4];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][4], f[4][4];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j];
- }
- }
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<3, 3, 6, 6, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_4x4_3x3_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_4x4_3x3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 1, 0>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 2, 0>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 3, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 1>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 2>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 3>,
-};
-
-template class OutputTransform<3, 3, 6, 6, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
deleted file mode 100644
index 58bed71a47..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_6_3_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- (void) output_row_stride;
- (void) _pad_bottom;
- constexpr int output_tile_cols = 6;
- constexpr int inner_tile_cols = 8;
-
- const int pad_right = Specialized ? PadRight : _pad_right;
- const int cells_j = output_tile_cols - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[cells_j];
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[j] = output + j*output_col_stride;
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1q_f32(inptr + j*matrix_stride);
- }
- inptr += 4;
-
- f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
- f[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 81), F[5], 81), F[4], 16), F[3], 16);
- f[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 243), F[4], 32), F[3], -32), F[5], -243), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[j], f[j] + b);
- outptrs[j] += 4;
- }
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1_f32(inptr + j*matrix_stride);
- }
- inptr += 2;
-
- f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
- f[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 81), F[5], 81), F[4], 16), F[3], 16);
- f[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 243), F[4], 32), F[3], -32), F[5], -243), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[j], f[j] + b);
- outptrs[j] += 2;
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = *(inptr + j*matrix_stride);
- }
- inptr++;
-
- f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
- f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
- f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
- f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1;
- f[4] = F[3]*16 + F[4]*16 + F[5]*81 + F[6]*81 + F[1]*1 + F[2]*1;
- f[5] = F[1]*-1 + F[5]*-243 + F[3]*-32 + F[4]*32 + F[6]*243 + F[2]*1 + F[7]*1;
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = *(bptr++);
- }
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[j]++) = f[j] + b;
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 3, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_6_3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_6_3_fp32_process_tile<true, 1>,
- winograd_output_transform_6_3_fp32_process_tile<true, 2>,
- winograd_output_transform_6_3_fp32_process_tile<true, 3>,
- winograd_output_transform_6_3_fp32_process_tile<true, 4>,
- winograd_output_transform_6_3_fp32_process_tile<true, 5>,
-};
-
-template class OutputTransform<1, 3, 1, 8, float>;
-template class OutputTransform<3, 1, 8, 1, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
deleted file mode 100644
index 85cf418656..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[kernel_cols];
- for (int j = 0; j < kernel_cols; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[kernel_cols], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < kernel_cols; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36.0f;
- V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
- V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
- V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
- V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
- V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
- V[7] = (w[6]*1) / 1.0f;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template <>
- template <>
- void WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Redirect to the 1xN implementation
- WinogradGEMM<1, 2, 1, 7>::template WeightsTransform<float>::execute(
- n_output_channels, n_input_channels, input, output, matrix_stride,
- matrix_row_stride
- );
- }
-
- template <>
- template <>
- int WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template struct WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>;
- template struct WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp
deleted file mode 100644
index 6c71461f81..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input,
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- constexpr int inner_tile_i = 4;
- constexpr int inner_tile_j = 4;
-
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const float *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
-
- // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
-
- // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
-
- // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
-
- // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
- Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
- V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- const int channel_prod = shape.n_input_channels * shape.n_output_channels;
- return 2 * 18 * channel_prod;
- }
-
- template struct WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp
deleted file mode 100644
index 2f4f6e1ba2..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input,
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 5 * weight_col_stride;
- const float *inptrs[5][5];
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- // Ww[0][j] = w[0][j]/4.0f;
- Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
-
- // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[1][j] = vmulq_n_f32(
- vaddq_f32(
- vaddq_f32(
- vaddq_f32(w[1][j], w[0][j]),
- vaddq_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- -1.0f/6.0f
- );
-
- // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
- Ww[2][j] = vmulq_n_f32(
- vsubq_f32(
- vaddq_f32(
- vsubq_f32(w[1][j], w[0][j]),
- vsubq_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- 1.0f/6.0f
- );
-
- // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[3][j] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
- vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
- vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[5][j] = w[4][j];
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- // V[i][0] = Ww[i][0]/4.0f;
- V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
-
- // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][1] = vmulq_n_f32(
- vaddq_f32(
- vaddq_f32(
- vaddq_f32(Ww[i][1], Ww[i][0]),
- vaddq_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- -1.0f/6.0f
- );
-
- // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
- V[i][2] = vmulq_n_f32(
- vsubq_f32(
- vaddq_f32(
- vsubq_f32(Ww[i][1], Ww[i][0]),
- vsubq_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- 1.0f/6.0f
- );
-
- // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][3] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
- vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
- vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][5] = Ww[i][4];
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- // Ww[0][j] = w[0][j]/4.0f;
- Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
-
- // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[1][j] = vmul_n_f32(
- vadd_f32(
- vadd_f32(
- vadd_f32(w[1][j], w[0][j]),
- vadd_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- -1.0f/6.0f
- );
-
- // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
- Ww[2][j] = vmul_n_f32(
- vsub_f32(
- vadd_f32(
- vsub_f32(w[1][j], w[0][j]),
- vsub_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- 1.0f/6.0f
- );
-
- // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[3][j] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
- vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
- vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[5][j] = w[4][j];
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- // V[i][0] = Ww[i][0]/4.0f;
- V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
-
- // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][1] = vmul_n_f32(
- vadd_f32(
- vadd_f32(
- vadd_f32(Ww[i][1], Ww[i][0]),
- vadd_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- -1.0f/6.0f
- );
-
- // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
- V[i][2] = vmul_n_f32(
- vsub_f32(
- vadd_f32(
- vsub_f32(Ww[i][1], Ww[i][0]),
- vsub_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- 1.0f/6.0f
- );
-
- // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][3] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
- vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
- vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][5] = Ww[i][4];
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- Ww[0][j] = w[0][j]/4.0f;
- Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = Ww[i][0]/4.0f;
- V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- return 0; // TODO
- }
-
- template class WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
deleted file mode 100644
index 2f14e20142..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[kernel_cols];
- for (int j = 0; j < kernel_cols; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[kernel_cols], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < kernel_cols; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36;
- V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
- V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
- V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
- V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
- V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
- V[7] = (w[4]*1) / 1;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template <>
- template <>
- void WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Redirect to the 1xN implementation
- WinogradGEMM<1, 4, 1, 5>::template WeightsTransform<float>::execute(
- n_output_channels, n_input_channels, input, output, matrix_stride,
- matrix_row_stride
- );
- }
-
- template <>
- template <>
- int WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template struct WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>;
- template struct WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp
deleted file mode 100644
index a56a475fc9..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- /* Float implementation for kernel transform F(4x4, 3x3) */
- template <>
- template <>
- void WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const float *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmulq_n_f32(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmul_n_f32(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmul_n_f32(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = 6*w[0][j];
- Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[5][j] = 24*w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = ( 6*Ww[i][0]) / 576.0;
- V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][5] = (24*Ww[i][2]) / 576.0;
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- const int channel_prod = shape.n_input_channels * shape.n_output_channels;
- return 9 * 16 * channel_prod;
- }
-
- template struct WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
deleted file mode 100644
index c560aa8c8f..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[3];
- for (int j = 0; j < 3; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < 3; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36.0f;
- V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
- V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
- V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
- V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
- V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
- V[7] = (w[2]*1) / 1;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template <>
- template <>
- void WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Redirect to the 1xN implementation
- WinogradGEMM<1, 6, 1, 3>::template WeightsTransform<float>::execute(
- n_output_channels, n_input_channels, input, output, matrix_stride,
- matrix_row_stride
- );
- }
-
- template <>
- template <>
- int WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template struct WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>;
- template struct WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>;
-}