aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution/winograd/winograd_transforms
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/convolution/winograd/winograd_transforms')
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp268
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp158
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp257
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp255
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp16_fp16_integers.cpp277
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp1308
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp78
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp252
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp143
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp231
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp225
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp152
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp16_fp16_integers.cpp255
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp254
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp155
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp90
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp220
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp401
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp90
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp16_fp16_integers.cpp259
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp257
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp90
22 files changed, 0 insertions, 5675 deletions
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp
deleted file mode 100644
index 8e4bebcd20..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-
-#include <algorithm>
-
-#include "padding.hpp"
-#include "utils.hpp"
-#include "winograd.hpp"
-
-#define MEMBERFN(RTYPE) template <\
- int InnerTileRows, int InnerTileCols,\
- typename TIn, typename TOut, WinogradRoots Roots\
-> RTYPE InputTransform<InnerTileRows, InnerTileCols, TIn, TOut, Roots>
-
-
-#define Nx1MEMBERFN(RTYPE) template <\
- int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\
-> RTYPE InputTransform<InnerTileRows, 1, TIn, TOut, Roots>
-
-namespace winograd
-{
-
-MEMBERFN()::InputTransform(
- const int kernel_rows,
- const int kernel_cols,
- const int n_batches,
- const int n_rows,
- const int n_cols,
- const int n_channels,
- const int padding_top,
- const int padding_left,
- const int padding_bottom,
- const int padding_right
-) : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
- _inptr(nullptr), _outptr(nullptr),
- _overlap_rows(kernel_rows - 1), _overlap_cols(kernel_cols - 1),
- _padding_top(padding_top), _padding_left(padding_left), _padding_bottom(padding_bottom), _padding_right(padding_right),
- _tiles_M(iceildiv(padding_top + n_rows + padding_bottom - kernel_rows + 1, InnerTileRows - kernel_rows + 1)),
- _tiles_N(iceildiv(padding_left + n_cols + padding_right - kernel_cols + 1, InnerTileCols - kernel_cols + 1)),
- _matrix_stride(0), _matrix_row_stride(0), _matrix_batch_stride(0),
- _in_col_stride(0), _in_row_stride(0), _in_batch_stride(0),
- _working_space_col_stride(n_channels),
- _working_space_row_stride(InnerTileCols * _working_space_col_stride),
- _working_space(nullptr)
-{
-}
-
-MEMBERFN(void)::set_input_tensor(const void* const inptr)
-{
- set_input_tensor(inptr, _n_channels);
-}
-
-MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol)
-{
- set_input_tensor(inptr, _n_cols * ldcol, ldcol);
-}
-
-MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol)
-{
- set_input_tensor(inptr, _n_rows * ldrow, ldrow, ldcol);
-}
-
-MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol)
-{
- _inptr = static_cast<const TIn *>(inptr);
- _in_batch_stride = ldbatch;
- _in_row_stride = ldrow;
- _in_col_stride = ldcol;
-}
-
-MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow)
-{
- _outptr = static_cast<TOut *>(mptr);
- _matrix_stride = ldmatrix;
- _matrix_row_stride = ldrow;
- _matrix_batch_stride = _tiles_M * _tiles_N * ldrow;
-}
-
-Nx1MEMBERFN()::InputTransform(
- const int kernel_rows,
- const int kernel_cols,
- const int n_batches,
- const int n_rows,
- const int n_cols,
- const int n_channels,
- const int padding_top,
- const int padding_left,
- const int padding_bottom,
- const int padding_right
-) : InputTransform<1, InnerTileRows, TIn, TOut, Roots>::InputTransform(
- /* Transpose rows and columns */
- kernel_cols, kernel_rows, n_batches, n_cols, n_rows, n_channels,
- padding_left, padding_top, padding_right, padding_bottom
- )
-{
-}
-
-Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr)
-{
- set_input_tensor(inptr, this->_n_channels);
-}
-
-Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol)
-{
- set_input_tensor(inptr, this->_n_cols * ldcol, ldcol);
-}
-
-Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol)
-{
- set_input_tensor(inptr, this->_n_rows * ldrow, ldrow, ldcol);
-}
-
-Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol)
-{
- // Transpose row and column strides
- Base::set_input_tensor(inptr, ldbatch, ldcol, ldrow);
-}
-
-MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const
-{
- return sizeof(TIn) * InnerTileRows * _working_space_row_stride * nthreads;
-}
-
-MEMBERFN(void)::set_working_space(void * const buffer)
-{
- _working_space = static_cast<TIn *>(buffer);
-}
-
-MEMBERFN(unsigned int)::get_window(void) const
-{
- return iceildiv(_n_channels, WINDOW_BLOCK);
-}
-
-MEMBERFN(void)::run(
- const unsigned int start,
- const unsigned int stop,
- const unsigned int threadid
-)
-{
- // Determine the channels on which to work
- if (start >= get_window())
- {
- return; // No work to do beyond the end of the window
- }
- const unsigned int start_channel = start * WINDOW_BLOCK;
- const unsigned int stop_channel = std::min<unsigned int>(_n_channels , stop * WINDOW_BLOCK);
- const unsigned int n_channels = stop_channel - start_channel;
-
- // Loop over batches
- for (int batch = 0; batch < _n_batches; batch++)
- {
- const TIn* const inptr_batch = _inptr + start_channel + batch*_in_batch_stride;
- TOut* const outptr_batch = _outptr + start_channel + batch*_matrix_batch_stride;
-
- // Loop over rows of tiles
- for (int tile_i = 0; tile_i < _tiles_M; tile_i++)
- {
- // Compute the starting and ending row of pixels within the row of tiles,
- // hence compute the padding to apply to the top and bottom of each tile.
- const int row_top = tile_i * (InnerTileRows - _overlap_rows) - _padding_top;
- const int row_bottom = row_top + InnerTileRows;
- const int row_pad_top = std::max(0, _padding_top - tile_i * (InnerTileRows - _overlap_rows));
- const int row_pad_bottom = std::max(0, row_bottom - _n_rows);
-
- // Get a pointer to the start of the row.
- const int row_offset = std::min(0, row_pad_top - _padding_top);
- const TIn* const inptr_row = inptr_batch + _in_row_stride*(row_offset + tile_i*(InnerTileRows - _overlap_rows));
- TOut* const outptr_row = outptr_batch + tile_i*_tiles_N*_matrix_row_stride;
-
- // Loop over tiles within the row
- for (int tile_j = 0; tile_j < _tiles_N; tile_j++)
- {
- // Compute the starting and ending column of pixels within the tile,
- // hence compute the padding to apply to the left and right of the
- // tile.
- const int tile_left = tile_j * (InnerTileCols - _overlap_cols) - _padding_left;
- const int tile_right = tile_left + InnerTileCols;
- const int tile_pad_left = std::max(0, _padding_left - tile_j * (InnerTileCols - _overlap_cols));
- const int tile_pad_right = std::max(0, tile_right - _n_cols);
-
- // Get a pointer to the start of the tile.
- const int col_offset = std::min(0, tile_pad_left - _padding_left);
- const TIn* const inptr_tile = inptr_row + _in_col_stride*(col_offset + tile_j*(InnerTileCols - _overlap_cols));
- TOut* const outptr_tile = outptr_row + tile_j * _matrix_row_stride;
-
- // Transform the tile, applying padding if necessary.
- if (row_pad_top || tile_pad_left || row_pad_bottom || tile_pad_right)
- {
- transform_padded_tile(
- threadid, n_channels, outptr_tile, inptr_tile,
- row_pad_top, tile_pad_left, row_pad_bottom, tile_pad_right
- );
- }
- else
- {
- transform_unpadded_tile(threadid, n_channels, outptr_tile, inptr_tile);
- }
- }
- }
- }
-}
-
-MEMBERFN(void)::transform_unpadded_tile(
- const unsigned int /* threadid unused */,
- const int n_channels,
- TOut * const outptr,
- const TIn * const inptr
-)
-{
- transform_tile(
- n_channels, inptr, _in_row_stride, _in_col_stride, outptr, _matrix_stride
- );
-}
-
-MEMBERFN(void)::transform_padded_tile(
- const unsigned int threadid,
- const int n_channels,
- TOut * const outptr,
- const TIn * const inptr,
- const int padding_top,
- const int padding_left,
- const int padding_bottom,
- const int padding_right
-)
-{
- padding::copy_and_pad_tile(
- InnerTileRows, InnerTileCols, n_channels,
- inptr, _in_row_stride, _in_col_stride,
- static_cast<TIn *>(get_working_space(threadid)), _working_space_row_stride, _working_space_col_stride,
- padding_top, padding_left, padding_bottom, padding_right
- );
-
- transform_tile(
- n_channels, static_cast<const TIn *>(get_working_space(threadid)),
- _working_space_row_stride, _working_space_col_stride,
- outptr, _matrix_stride
- );
-}
-
-MEMBERFN(void *)::get_working_space(const unsigned int threadid) const
-{
- return _working_space + InnerTileRows * _working_space_row_stride * threadid;
-}
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp
deleted file mode 100644
index 5040ec1bd4..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "input.hpp"
-
-namespace winograd
-{
-
-template <>
-void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* const input_base,
- const int, // We don't need to stride over rows
- const int input_col_stride,
- float* outptr,
- const int matrix_stride
-)
-{
- constexpr int inner_tile_cols = 8;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_cols];
- for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
- {
- x_ptrs[j] = input_base + xj*input_col_stride;
- }
-
- // Vectors used/computed in this kernel.
- float x[inner_tile_cols];
- float U[inner_tile_cols];
-
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = 0.0f;
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef _arm_any_
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- float32x4_t x[inner_tile_cols], U[inner_tile_cols];
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = vdupq_n_f32(0.0f);
- }
-
- // Load x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = vld1q_f32(x_ptrs[j]);
- x_ptrs[j] += 4;
- }
-
- // Compute U = x . X
- U[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
- U[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
- U[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
- U[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
- U[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
- U[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
- U[6] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
- U[7] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
-
- // Store the transformed vector
- for (int j = 0; j < inner_tile_cols; j++)
- {
- vst1q_f32(outptr + j*matrix_stride, U[j]);
- }
- outptr += 4;
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- float32x2_t x[inner_tile_cols], U[inner_tile_cols];
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = vdup_n_f32(0.0f);
- }
-
- // Load x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = vld1_f32(x_ptrs[j]);
- x_ptrs[j] += 2;
- }
-
- // Compute U = x . X
- U[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
- U[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
- U[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
- U[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
- U[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
- U[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
- U[6] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
- U[7] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
-
- // Store the transformed vector
- for (int j = 0; j < inner_tile_cols; j++)
- {
- vst1_f32(outptr + j*matrix_stride, U[j]);
- }
- outptr += 2;
- }
-#endif // _arm_any_
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[j] = *(x_ptrs[j]++);
- }
-
- // Compute U = x . X
- U[0] = x[0]*-36 + x[4]*-14 + x[2]*49 + x[6]*1;
- U[1] = x[5]*-1 + x[1]*-36 + x[4]*-13 + x[3]*13 + x[2]*36 + x[6]*1;
- U[2] = x[3]*-13 + x[4]*-13 + x[1]*36 + x[2]*36 + x[5]*1 + x[6]*1;
- U[3] = x[1]*-18 + x[4]*-10 + x[5]*-2 + x[2]*9 + x[3]*20 + x[6]*1;
- U[4] = x[3]*-20 + x[4]*-10 + x[5]*2 + x[2]*9 + x[1]*18 + x[6]*1;
- U[5] = x[1]*-12 + x[4]*-5 + x[5]*-3 + x[2]*4 + x[3]*15 + x[6]*1;
- U[6] = x[3]*-15 + x[4]*-5 + x[5]*3 + x[2]*4 + x[1]*12 + x[6]*1;
- U[7] = x[1]*-36 + x[5]*-14 + x[3]*49 + x[7]*1;
-
- // Store the transformed vector
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = U[j];
- }
- outptr++;
- }
-}
-
-template class InputTransform<1, 8, float, float, WinogradRoots::Integers>;
-template class InputTransform<8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp
deleted file mode 100644
index 1ea68b5938..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-#include "input.hpp"
-#include "arm.hpp"
-
-namespace winograd
-{
-
-template <>
-void InputTransform<4, 4, __fp16, __fp16, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const __fp16* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- __fp16* outptr,
- const int matrix_stride
-)
-{
- constexpr int inner_tile_rows = 4, inner_tile_cols = 4;
-
- // Get pointers into the input tile
- const __fp16 *x_ptrs[inner_tile_rows][inner_tile_cols];
- for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
- {
- // Get a pointer into the row
- const __fp16* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- __fp16 x[inner_tile_rows][inner_tile_cols];
- __fp16 XTx[inner_tile_rows][inner_tile_cols];
- __fp16 U[inner_tile_rows][inner_tile_cols];
-
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 8; channels_remaining -= 8)
- {
- // Matrices used/computed in this kernel.
- float16x8_t x[inner_tile_rows][inner_tile_cols];
- float16x8_t XTx[inner_tile_rows][inner_tile_cols];
- float16x8_t U[inner_tile_rows][inner_tile_cols];
-
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdupq_n_f16(0.0f);
- XTx[i][j] = vdupq_n_f16(0.0f);
- }
- }
-
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1q_f16(x_ptrs[i][j]);
- x_ptrs[i][j] += 8;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = x[0][j] - x[2][j];
- XTx[0][j] = vsubq_f16(x[0][j], x[2][j]);
-
- // XTx[1][j] = x[1][j] + x[2][j];
- XTx[1][j] = vaddq_f16(x[1][j], x[2][j]);
-
- // XTx[2][j] = x[2][j] - x[1][j];
- XTx[2][j] = vsubq_f16(x[2][j], x[1][j]);
-
- // XTx[3][j] = x[1][j] - x[3][j];
- XTx[3][j] = vsubq_f16(x[1][j], x[3][j]);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][0] = vsubq_f16(XTx[i][0], XTx[i][2]);
-
- // U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][1] = vaddq_f16(XTx[i][1], XTx[i][2]);
-
- // U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][2] = vsubq_f16(XTx[i][2], XTx[i][1]);
-
- // U[i][3] = XTx[i][1] - XTx[i][3];
- U[i][3] = vsubq_f16(XTx[i][1], XTx[i][3]);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1q_f16(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 8;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used/computed in this kernel.
- float16x4_t x[inner_tile_rows][inner_tile_cols];
- float16x4_t XTx[inner_tile_rows][inner_tile_cols];
- float16x4_t U[inner_tile_rows][inner_tile_cols];
-
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdup_n_f16(0.0f);
- XTx[i][j] = vdup_n_f16(0.0f);
- }
- }
-
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1_f16(x_ptrs[i][j]);
- x_ptrs[i][j] += 4;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = x[0][j] - x[2][j];
- XTx[0][j] = vsub_f16(x[0][j], x[2][j]);
-
- // XTx[1][j] = x[1][j] + x[2][j];
- XTx[1][j] = vadd_f16(x[1][j], x[2][j]);
-
- // XTx[2][j] = x[2][j] - x[1][j];
- XTx[2][j] = vsub_f16(x[2][j], x[1][j]);
-
- // XTx[3][j] = x[1][j] - x[3][j];
- XTx[3][j] = vsub_f16(x[1][j], x[3][j]);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][0] = vsub_f16(XTx[i][0], XTx[i][2]);
-
- // U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][1] = vadd_f16(XTx[i][1], XTx[i][2]);
-
- // U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][2] = vsub_f16(XTx[i][2], XTx[i][1]);
-
- // U[i][3] = XTx[i][1] - XTx[i][3];
- U[i][3] = vsub_f16(XTx[i][1], XTx[i][3]);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1_f16(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- XTx[0][j] = x[0][j] - x[2][j];
- XTx[1][j] = x[1][j] + x[2][j];
- XTx[2][j] = x[2][j] - x[1][j];
- XTx[3][j] = x[1][j] - x[3][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][3] = XTx[i][1] - XTx[i][3];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-
-template class InputTransform<4, 4, __fp16, __fp16, WinogradRoots::Integers>;
-
-} // namespace
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp
deleted file mode 100644
index 9393785dfc..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "input.hpp"
-#include "arm.hpp"
-
-namespace winograd
-{
-
-template <>
-void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* outptr,
- const int matrix_stride
-)
-{
- constexpr int inner_tile_rows = 4, inner_tile_cols = 4;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_rows][inner_tile_cols];
- for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
- {
- // Get a pointer into the row
- const float* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- float x[inner_tile_rows][inner_tile_cols];
- float XTx[inner_tile_rows][inner_tile_cols];
- float U[inner_tile_rows][inner_tile_cols];
-
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used/computed in this kernel.
- float32x4_t x[inner_tile_rows][inner_tile_cols];
- float32x4_t XTx[inner_tile_rows][inner_tile_cols];
- float32x4_t U[inner_tile_rows][inner_tile_cols];
-
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdupq_n_f32(0.0f);
- XTx[i][j] = vdupq_n_f32(0.0f);
- }
- }
-
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1q_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 4;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = x[0][j] - x[2][j];
- XTx[0][j] = vsubq_f32(x[0][j], x[2][j]);
-
- // XTx[1][j] = x[1][j] + x[2][j];
- XTx[1][j] = vaddq_f32(x[1][j], x[2][j]);
-
- // XTx[2][j] = x[2][j] - x[1][j];
- XTx[2][j] = vsubq_f32(x[2][j], x[1][j]);
-
- // XTx[3][j] = x[1][j] - x[3][j];
- XTx[3][j] = vsubq_f32(x[1][j], x[3][j]);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][0] = vsubq_f32(XTx[i][0], XTx[i][2]);
-
- // U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][1] = vaddq_f32(XTx[i][1], XTx[i][2]);
-
- // U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][2] = vsubq_f32(XTx[i][2], XTx[i][1]);
-
- // U[i][3] = XTx[i][1] - XTx[i][3];
- U[i][3] = vsubq_f32(XTx[i][1], XTx[i][3]);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used/computed in this kernel.
- float32x2_t x[inner_tile_rows][inner_tile_cols];
- float32x2_t XTx[inner_tile_rows][inner_tile_cols];
- float32x2_t U[inner_tile_rows][inner_tile_cols];
-
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdup_n_f32(0.0f);
- XTx[i][j] = vdup_n_f32(0.0f);
- }
- }
-
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 2;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = x[0][j] - x[2][j];
- XTx[0][j] = vsub_f32(x[0][j], x[2][j]);
-
- // XTx[1][j] = x[1][j] + x[2][j];
- XTx[1][j] = vadd_f32(x[1][j], x[2][j]);
-
- // XTx[2][j] = x[2][j] - x[1][j];
- XTx[2][j] = vsub_f32(x[2][j], x[1][j]);
-
- // XTx[3][j] = x[1][j] - x[3][j];
- XTx[3][j] = vsub_f32(x[1][j], x[3][j]);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][0] = vsub_f32(XTx[i][0], XTx[i][2]);
-
- // U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][1] = vadd_f32(XTx[i][1], XTx[i][2]);
-
- // U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][2] = vsub_f32(XTx[i][2], XTx[i][1]);
-
- // U[i][3] = XTx[i][1] - XTx[i][3];
- U[i][3] = vsub_f32(XTx[i][1], XTx[i][3]);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- XTx[0][j] = x[0][j] - x[2][j];
- XTx[1][j] = x[1][j] + x[2][j];
- XTx[2][j] = x[2][j] - x[1][j];
- XTx[3][j] = x[1][j] - x[3][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- U[i][0] = XTx[i][0] - XTx[i][2];
- U[i][1] = XTx[i][1] + XTx[i][2];
- U[i][2] = XTx[i][2] - XTx[i][1];
- U[i][3] = XTx[i][1] - XTx[i][3];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-
-template class InputTransform<4, 4, float, float, WinogradRoots::Integers>;
-
-} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp16_fp16_integers.cpp
deleted file mode 100644
index 3eaf977826..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp16_fp16_integers.cpp
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright (c) 2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-#include "arm.hpp"
-#include "input.hpp"
-
-namespace winograd
-{
-template <>
-void InputTransform<6, 6, __fp16, __fp16, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const __fp16* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- __fp16* outptr,
- const int matrix_stride
-)
-{
- constexpr int inner_tile_rows = 6;
- constexpr int inner_tile_cols = 6;
-
- // Get pointers into the input tile
- const __fp16 *x_ptrs[inner_tile_rows][inner_tile_cols];
- for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
- {
- // Get a pointer into the row
- const __fp16* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- __fp16 x[inner_tile_rows][inner_tile_cols];
- __fp16 XTx[inner_tile_rows][inner_tile_cols];
- __fp16 U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
- for (; channels_remaining >= 8; channels_remaining -= 8)
- {
- // Matrices used/computed in this kernel
- float16x8_t x[inner_tile_rows][inner_tile_cols];
- float16x8_t XTx[inner_tile_rows][inner_tile_cols];
- float16x8_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdupq_n_f16(0.0f);
- XTx[i][j] = vdupq_n_f16(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1q_f16(x_ptrs[i][j]);
- x_ptrs[i][j] += 8;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vsubq_f16(vaddq_f16(x[4][j], vmulq_f16(x[0][j], vdupq_n_f16(4.0f))), vmulq_f16(x[2][j], vdupq_n_f16(5.0f)));
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vsubq_f16(vaddq_f16(x[3][j], x[4][j]), vmulq_f16(vaddq_f16(x[1][j], x[2][j]), vdupq_n_f16(4.0f)));
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vaddq_f16(vsubq_f16(x[4][j], x[3][j]), vmulq_f16(vsubq_f16(x[1][j], x[2][j]), vdupq_n_f16(4.0f)));
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vaddq_f16(vsubq_f16(x[4][j], x[2][j]), vmulq_f16(vsubq_f16(x[3][j], x[1][j]), vdupq_n_f16(2.0f)));
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vaddq_f16(vsubq_f16(x[4][j], x[2][j]), vmulq_f16(vsubq_f16(x[1][j], x[3][j]), vdupq_n_f16(2.0f)));
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vsubq_f16(vaddq_f16(x[5][j], vmulq_f16(x[1][j], vdupq_n_f16(4.0f))), vmulq_f16(x[3][j], vdupq_n_f16(5.0f)));
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vsubq_f16(vaddq_f16(XTx[i][4], vmulq_f16(XTx[i][0], vdupq_n_f16(4.0f))), vmulq_f16(XTx[i][2], vdupq_n_f16(5.0f)));
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vsubq_f16(vaddq_f16(XTx[i][3], XTx[i][4]), vmulq_f16(vaddq_f16(XTx[i][1], XTx[i][2]), vdupq_n_f16(4.0f)));
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vaddq_f16(vsubq_f16(XTx[i][4], XTx[i][3]), vmulq_f16(vsubq_f16(XTx[i][1], XTx[i][2]), vdupq_n_f16(4.0f)));
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vaddq_f16(vsubq_f16(XTx[i][4], XTx[i][2]), vmulq_f16(vsubq_f16(XTx[i][3], XTx[i][1]), vdupq_n_f16(2.0f)));
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vaddq_f16(vsubq_f16(XTx[i][4], XTx[i][2]), vmulq_f16(vsubq_f16(XTx[i][1], XTx[i][3]), vdupq_n_f16(2.0f)));
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vsubq_f16(vaddq_f16(XTx[i][5], vmulq_f16(XTx[i][1], vdupq_n_f16(4.0f))), vmulq_f16(XTx[i][3], vdupq_n_f16(5.0f)));
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1q_f16(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 8;
- }
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used/computed in this kernel
- float16x4_t x[inner_tile_rows][inner_tile_cols];
- float16x4_t XTx[inner_tile_rows][inner_tile_cols];
- float16x4_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdup_n_f16(0.0f);
- XTx[i][j] = vdup_n_f16(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1_f16(x_ptrs[i][j]);
- x_ptrs[i][j] += 4;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vsub_f16(vadd_f16(x[4][j], vmul_f16(x[0][j], vdup_n_f16(4.0f))), vmul_f16(x[2][j], vdup_n_f16(5.0f)));
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vsub_f16(vadd_f16(x[3][j], x[4][j]), vmul_f16(vadd_f16(x[1][j], x[2][j]), vdup_n_f16(4.0f)));
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vadd_f16(vsub_f16(x[4][j], x[3][j]), vmul_f16(vsub_f16(x[1][j], x[2][j]), vdup_n_f16(4.0f)));
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vadd_f16(vsub_f16(x[4][j], x[2][j]), vmul_f16(vsub_f16(x[3][j], x[1][j]), vdup_n_f16(2.0f)));
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vadd_f16(vsub_f16(x[4][j], x[2][j]), vmul_f16(vsub_f16(x[1][j], x[3][j]), vdup_n_f16(2.0f)));
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vsub_f16(vadd_f16(x[5][j], vmul_f16(x[1][j], vdup_n_f16(4.0f))), vmul_f16(x[3][j], vdup_n_f16(5.0f)));
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vsub_f16(vadd_f16(XTx[i][4], vmul_f16(XTx[i][0], vdup_n_f16(4.0f))), vmul_f16(XTx[i][2], vdup_n_f16(5.0f)));
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vsub_f16(vadd_f16(XTx[i][3], XTx[i][4]), vmul_f16(vadd_f16(XTx[i][1], XTx[i][2]), vdup_n_f16(4.0f)));
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vadd_f16(vsub_f16(XTx[i][4], XTx[i][3]), vmul_f16(vsub_f16(XTx[i][1], XTx[i][2]), vdup_n_f16(4.0f)));
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vadd_f16(vsub_f16(XTx[i][4], XTx[i][2]), vmul_f16(vsub_f16(XTx[i][3], XTx[i][1]), vdup_n_f16(2.0f)));
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vadd_f16(vsub_f16(XTx[i][4], XTx[i][2]), vmul_f16(vsub_f16(XTx[i][1], XTx[i][3]), vdup_n_f16(2.0f)));
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vsub_f16(vadd_f16(XTx[i][5], vmul_f16(XTx[i][1], vdup_n_f16(4.0f))), vmul_f16(XTx[i][3], vdup_n_f16(5.0f)));
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1_f16(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 4;
- }
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-
-template class InputTransform<6, 6, __fp16, __fp16, WinogradRoots::Integers>;
-
-} // namespace winograd
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC \ No newline at end of file
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp
deleted file mode 100644
index e4aad76d97..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,1308 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "input.hpp"
-
-namespace winograd
-{
-
-#ifdef __aarch64__
-
-template <>
-void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile(
- int n_channels,
- const float* input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* matrix_base,
- const int matrix_stride
-)
-{
- const float pcoeffs[4] = {1.0f, 2.0f, 4.0f, 5.0f};
- __asm__ __volatile__(
- "ldr q0, [%[pcoeffs]]\n"
- "add x25, %[inptr0], %[input_row_stride]\n"
- "add x9, %[input_col_stride1], %[input_col_stride1]\n"
- "add x16, x25, %[input_row_stride]\n"
- "add x19, x9, %[input_col_stride1]\n"
- "add x26, x16, %[input_row_stride]\n"
- "add x20, x19, %[input_col_stride1]\n"
- "add x17, x26, %[input_row_stride]\n"
- "add x21, x20, %[input_col_stride1]\n"
- "add x27, x17, %[input_row_stride]\n"
- "add x28, %[outptr0], %[output_row_stride]\n"
- "add x11, %[output_col_stride1], %[output_col_stride1]\n"
- "add x22, x28, %[output_row_stride]\n"
- "add x13, x11, %[output_col_stride1]\n"
- "add x12, x22, %[output_row_stride]\n"
- "add x23, x13, %[output_col_stride1]\n"
- "add x14, x12, %[output_row_stride]\n"
- "add x15, x23, %[output_col_stride1]\n"
- "add x24, x14, %[output_row_stride]\n"
- "cmp %w[n_channels], #4\n"
- "blt 2f\n"
- "1:\n"
- "ldr q8, [%[inptr0], x20]\n"
- "ldr q2, [%[inptr0], x9]\n"
- "mov v14.16b, v8.16b\n"
- "ldr q9, [%[inptr0]]\n"
- "mov v10.16b, v8.16b\n"
- "ldr q1, [%[inptr0], x21]\n"
- "fmla v14.4s, v9.4s, v0.s[2]\n"
- "ldr q4, [%[inptr0], x19]\n"
- "mov v9.16b, v8.16b\n"
- "ldr q12, [%[inptr0], %[input_col_stride1]]\n"
- "fmls v10.4s, v12.4s, v0.s[2]\n"
- "ldr q5, [x16, x20]\n"
- "fmls v14.4s, v2.4s, v0.s[3]\n"
- "ldr q20, [x16, x9]\n"
- "fmla v9.4s, v12.4s, v0.s[2]\n"
- "ldr q3, [x16]\n"
- "fmls v10.4s, v2.4s, v0.s[2]\n"
- "ldr q6, [x16, x21]\n"
- "mov v7.16b, v8.16b\n"
- "ldr q16, [x16, x19]\n"
- "fmls v9.4s, v2.4s, v0.s[2]\n"
- "ldr q22, [x16, %[input_col_stride1]]\n"
- "fadd v10.4s, v10.4s, v4.4s\n"
- "ldr q17, [x17, x20]\n"
- "fmls v7.4s, v12.4s, v0.s[1]\n"
- "ldr q15, [x17, x9]\n"
- "fsub v9.4s, v9.4s, v4.4s\n"
- "ldr q19, [x17]\n"
- "mov v8.16b, v8.16b\n"
- "ldr q18, [x17, x21]\n"
- "fsub v7.4s, v7.4s, v2.4s\n"
- "ldr q13, [x17, x19]\n"
- "fmla v7.4s, v4.4s, v0.s[1]\n"
- "ldr q21, [x17, %[input_col_stride1]]\n"
- "fmla v8.4s, v12.4s, v0.s[1]\n"
- "add %[inptr0], %[inptr0], #16\n"
- "mov v11.16b, v1.16b\n"
- "add x16, x16, #16\n"
- "mov v1.16b, v5.16b\n"
- "add x17, x17, #16\n"
- "fsub v8.4s, v8.4s, v2.4s\n"
- "fmla v11.4s, v12.4s, v0.s[2]\n"
- "fmls v8.4s, v4.4s, v0.s[1]\n"
- "fmla v1.4s, v3.4s, v0.s[2]\n"
- "mov v2.16b, v5.16b\n"
- "mov v3.16b, v5.16b\n"
- "fmls v11.4s, v4.4s, v0.s[3]\n"
- "mov v4.16b, v5.16b\n"
- "fmls v1.4s, v20.4s, v0.s[3]\n"
- "fmls v2.4s, v22.4s, v0.s[2]\n"
- "fmla v3.4s, v22.4s, v0.s[2]\n"
- "fmls v4.4s, v22.4s, v0.s[1]\n"
- "mov v5.16b, v5.16b\n"
- "mov v6.16b, v6.16b\n"
- "fmls v2.4s, v20.4s, v0.s[2]\n"
- "mov v12.16b, v17.16b\n"
- "fmls v3.4s, v20.4s, v0.s[2]\n"
- "fsub v4.4s, v4.4s, v20.4s\n"
- "fmla v4.4s, v16.4s, v0.s[1]\n"
- "fmla v5.4s, v22.4s, v0.s[1]\n"
- "fadd v2.4s, v2.4s, v16.4s\n"
- "fmla v6.4s, v22.4s, v0.s[2]\n"
- "fsub v3.4s, v3.4s, v16.4s\n"
- "fmla v12.4s, v19.4s, v0.s[2]\n"
- "fsub v5.4s, v5.4s, v20.4s\n"
- "mov v19.16b, v17.16b\n"
- "fmls v5.4s, v16.4s, v0.s[1]\n"
- "fmls v6.4s, v16.4s, v0.s[3]\n"
- "fmls v12.4s, v15.4s, v0.s[3]\n"
- "fmls v19.4s, v21.4s, v0.s[2]\n"
- "mov v20.16b, v17.16b\n"
- "mov v16.16b, v17.16b\n"
- "mov v17.16b, v17.16b\n"
- "mov v18.16b, v18.16b\n"
- "fmls v19.4s, v15.4s, v0.s[2]\n"
- "fmla v20.4s, v21.4s, v0.s[2]\n"
- "fmls v16.4s, v21.4s, v0.s[1]\n"
- "fmla v17.4s, v21.4s, v0.s[1]\n"
- "fmla v18.4s, v21.4s, v0.s[2]\n"
- "mov v23.16b, v12.16b\n"
- "fadd v19.4s, v19.4s, v13.4s\n"
- "fmls v20.4s, v15.4s, v0.s[2]\n"
- "fsub v16.4s, v16.4s, v15.4s\n"
- "fsub v17.4s, v17.4s, v15.4s\n"
- "fmla v16.4s, v13.4s, v0.s[1]\n"
- "fmls v17.4s, v13.4s, v0.s[1]\n"
- "fsub v20.4s, v20.4s, v13.4s\n"
- "fmls v18.4s, v13.4s, v0.s[3]\n"
- "fmla v23.4s, v14.4s, v0.s[2]\n"
- "mov v15.16b, v19.16b\n"
- "mov v14.16b, v20.16b\n"
- "mov v24.16b, v16.16b\n"
- "fmla v15.4s, v10.4s, v0.s[2]\n"
- "mov v10.16b, v17.16b\n"
- "fmls v23.4s, v1.4s, v0.s[3]\n"
- "fmla v14.4s, v9.4s, v0.s[2]\n"
- "fmla v24.4s, v7.4s, v0.s[2]\n"
- "fmla v10.4s, v8.4s, v0.s[2]\n"
- "fmls v15.4s, v2.4s, v0.s[3]\n"
- "mov v7.16b, v18.16b\n"
- "str q23, [%[outptr0]]\n"
- "fmls v14.4s, v3.4s, v0.s[3]\n"
- "fmls v24.4s, v4.4s, v0.s[3]\n"
- "fmls v10.4s, v5.4s, v0.s[3]\n"
- "str q15, [%[outptr0], %[output_col_stride1]]\n"
- "fmla v7.4s, v11.4s, v0.s[2]\n"
- "str q14, [%[outptr0], x11]\n"
- "str q24, [%[outptr0], x13]\n"
- "str q10, [%[outptr0], x23]\n"
- "fmls v7.4s, v6.4s, v0.s[3]\n"
- "str q7, [%[outptr0], x15]\n"
- "add %[outptr0], %[outptr0], #16\n"
- "mov v26.16b, v12.16b\n"
- "mov v25.16b, v19.16b\n"
- "ldr q11, [x25, x20]\n"
- "mov v10.16b, v11.16b\n"
- "ldr q23, [x25, x9]\n"
- "mov v9.16b, v11.16b\n"
- "ldr q7, [x25]\n"
- "fmla v10.4s, v7.4s, v0.s[2]\n"
- "ldr q13, [x25, x21]\n"
- "mov v7.16b, v11.16b\n"
- "ldr q31, [x25, x19]\n"
- "mov v8.16b, v11.16b\n"
- "ldr q21, [x25, %[input_col_stride1]]\n"
- "fmls v10.4s, v23.4s, v0.s[3]\n"
- "ldr q30, [x26, x20]\n"
- "fmls v9.4s, v21.4s, v0.s[2]\n"
- "ldr q29, [x26, x9]\n"
- "fmla v7.4s, v21.4s, v0.s[2]\n"
- "ldr q22, [x26]\n"
- "fmls v8.4s, v21.4s, v0.s[1]\n"
- "ldr q24, [x26, x21]\n"
- "fmls v9.4s, v23.4s, v0.s[2]\n"
- "ldr q27, [x26, x19]\n"
- "fmls v7.4s, v23.4s, v0.s[2]\n"
- "ldr q28, [x26, %[input_col_stride1]]\n"
- "fsub v8.4s, v8.4s, v23.4s\n"
- "add x25, x25, #16\n"
- "fadd v9.4s, v9.4s, v31.4s\n"
- "add x26, x26, #16\n"
- "fsub v7.4s, v7.4s, v31.4s\n"
- "fmla v8.4s, v31.4s, v0.s[1]\n"
- "mov v11.16b, v11.16b\n"
- "mov v15.16b, v13.16b\n"
- "mov v14.16b, v30.16b\n"
- "mov v13.16b, v30.16b\n"
- "fmla v11.4s, v21.4s, v0.s[1]\n"
- "fmla v15.4s, v21.4s, v0.s[2]\n"
- "fmla v14.4s, v22.4s, v0.s[2]\n"
- "fmls v13.4s, v28.4s, v0.s[2]\n"
- "mov v21.16b, v30.16b\n"
- "mov v22.16b, v30.16b\n"
- "fsub v11.4s, v11.4s, v23.4s\n"
- "fmls v15.4s, v31.4s, v0.s[3]\n"
- "fmls v11.4s, v31.4s, v0.s[1]\n"
- "fmls v14.4s, v29.4s, v0.s[3]\n"
- "fmls v13.4s, v29.4s, v0.s[2]\n"
- "fmla v21.4s, v28.4s, v0.s[2]\n"
- "fmls v22.4s, v28.4s, v0.s[1]\n"
- "mov v23.16b, v30.16b\n"
- "mov v24.16b, v24.16b\n"
- "fmls v26.4s, v10.4s, v0.s[2]\n"
- "fadd v13.4s, v13.4s, v27.4s\n"
- "fmls v21.4s, v29.4s, v0.s[2]\n"
- "fsub v22.4s, v22.4s, v29.4s\n"
- "fmla v23.4s, v28.4s, v0.s[1]\n"
- "fmla v22.4s, v27.4s, v0.s[1]\n"
- "fmla v24.4s, v28.4s, v0.s[2]\n"
- "fsub v21.4s, v21.4s, v27.4s\n"
- "fmls v26.4s, v1.4s, v0.s[2]\n"
- "fsub v23.4s, v23.4s, v29.4s\n"
- "fmls v25.4s, v9.4s, v0.s[2]\n"
- "fmls v23.4s, v27.4s, v0.s[1]\n"
- "fmls v24.4s, v27.4s, v0.s[3]\n"
- "fadd v26.4s, v26.4s, v14.4s\n"
- "mov v27.16b, v20.16b\n"
- "str q26, [x28]\n"
- "fmls v25.4s, v2.4s, v0.s[2]\n"
- "fmls v27.4s, v7.4s, v0.s[2]\n"
- "mov v31.16b, v16.16b\n"
- "mov v30.16b, v17.16b\n"
- "mov v29.16b, v18.16b\n"
- "fadd v25.4s, v25.4s, v13.4s\n"
- "fmls v31.4s, v8.4s, v0.s[2]\n"
- "str q25, [x28, %[output_col_stride1]]\n"
- "fmls v27.4s, v3.4s, v0.s[2]\n"
- "fmls v30.4s, v11.4s, v0.s[2]\n"
- "fmls v29.4s, v15.4s, v0.s[2]\n"
- "fmls v31.4s, v4.4s, v0.s[2]\n"
- "mov v26.16b, v12.16b\n"
- "fadd v27.4s, v27.4s, v21.4s\n"
- "mov v25.16b, v19.16b\n"
- "str q27, [x28, x11]\n"
- "fmls v30.4s, v5.4s, v0.s[2]\n"
- "fadd v31.4s, v31.4s, v22.4s\n"
- "fmls v29.4s, v6.4s, v0.s[2]\n"
- "str q31, [x28, x13]\n"
- "fmla v26.4s, v10.4s, v0.s[2]\n"
- "fadd v30.4s, v30.4s, v23.4s\n"
- "fmla v25.4s, v9.4s, v0.s[2]\n"
- "str q30, [x28, x23]\n"
- "fadd v29.4s, v29.4s, v24.4s\n"
- "str q29, [x28, x15]\n"
- "fmls v26.4s, v1.4s, v0.s[2]\n"
- "fmls v25.4s, v2.4s, v0.s[2]\n"
- "add x28, x28, #16\n"
- "mov v30.16b, v20.16b\n"
- "mov v29.16b, v16.16b\n"
- "fsub v26.4s, v26.4s, v14.4s\n"
- "mov v28.16b, v17.16b\n"
- "str q26, [x22]\n"
- "fsub v25.4s, v25.4s, v13.4s\n"
- "str q25, [x22, %[output_col_stride1]]\n"
- "fmla v30.4s, v7.4s, v0.s[2]\n"
- "fmla v29.4s, v8.4s, v0.s[2]\n"
- "fmla v28.4s, v11.4s, v0.s[2]\n"
- "mov v26.16b, v18.16b\n"
- "mov v25.16b, v12.16b\n"
- "fmls v30.4s, v3.4s, v0.s[2]\n"
- "mov v31.16b, v19.16b\n"
- "fmls v29.4s, v4.4s, v0.s[2]\n"
- "fmls v28.4s, v5.4s, v0.s[2]\n"
- "fmla v26.4s, v15.4s, v0.s[2]\n"
- "fmls v25.4s, v10.4s, v0.s[1]\n"
- "fsub v30.4s, v30.4s, v21.4s\n"
- "fmls v31.4s, v9.4s, v0.s[1]\n"
- "str q30, [x22, x11]\n"
- "fsub v29.4s, v29.4s, v22.4s\n"
- "str q29, [x22, x13]\n"
- "fsub v28.4s, v28.4s, v23.4s\n"
- "str q28, [x22, x23]\n"
- "fmls v26.4s, v6.4s, v0.s[2]\n"
- "fsub v25.4s, v25.4s, v1.4s\n"
- "fsub v31.4s, v31.4s, v2.4s\n"
- "fmla v25.4s, v14.4s, v0.s[1]\n"
- "fmla v31.4s, v13.4s, v0.s[1]\n"
- "fsub v26.4s, v26.4s, v24.4s\n"
- "mov v27.16b, v20.16b\n"
- "str q26, [x22, x15]\n"
- "mov v26.16b, v16.16b\n"
- "str q25, [x12]\n"
- "fmls v27.4s, v7.4s, v0.s[1]\n"
- "str q31, [x12, %[output_col_stride1]]\n"
- "fmls v26.4s, v8.4s, v0.s[1]\n"
- "mov v25.16b, v17.16b\n"
- "add x22, x22, #16\n"
- "fsub v27.4s, v27.4s, v3.4s\n"
- "mov v28.16b, v18.16b\n"
- "fmla v27.4s, v21.4s, v0.s[1]\n"
- "fsub v26.4s, v26.4s, v4.4s\n"
- "fmla v26.4s, v22.4s, v0.s[1]\n"
- "fmls v25.4s, v11.4s, v0.s[1]\n"
- "fmls v28.4s, v15.4s, v0.s[1]\n"
- "mov v12.16b, v12.16b\n"
- "str q27, [x12, x11]\n"
- "mov v19.16b, v19.16b\n"
- "str q26, [x12, x13]\n"
- "fsub v25.4s, v25.4s, v5.4s\n"
- "fmla v25.4s, v23.4s, v0.s[1]\n"
- "fsub v28.4s, v28.4s, v6.4s\n"
- "fmla v28.4s, v24.4s, v0.s[1]\n"
- "fmla v12.4s, v10.4s, v0.s[1]\n"
- "fmla v19.4s, v9.4s, v0.s[1]\n"
- "mov v20.16b, v20.16b\n"
- "str q25, [x12, x23]\n"
- "mov v16.16b, v16.16b\n"
- "str q28, [x12, x15]\n"
- "fsub v12.4s, v12.4s, v1.4s\n"
- "fmls v12.4s, v14.4s, v0.s[1]\n"
- "add x12, x12, #16\n"
- "fsub v19.4s, v19.4s, v2.4s\n"
- "fmla v20.4s, v7.4s, v0.s[1]\n"
- "fmls v19.4s, v13.4s, v0.s[1]\n"
- "fmla v16.4s, v8.4s, v0.s[1]\n"
- "str q12, [x14]\n"
- "mov v1.16b, v17.16b\n"
- "fsub v20.4s, v20.4s, v3.4s\n"
- "mov v17.16b, v18.16b\n"
- "str q19, [x14, %[output_col_stride1]]\n"
- "fmls v20.4s, v21.4s, v0.s[1]\n"
- "fsub v16.4s, v16.4s, v4.4s\n"
- "fmla v1.4s, v11.4s, v0.s[1]\n"
- "fmls v16.4s, v22.4s, v0.s[1]\n"
- "fmla v17.4s, v15.4s, v0.s[1]\n"
- "str q20, [x14, x11]\n"
- "fsub v1.4s, v1.4s, v5.4s\n"
- "str q16, [x14, x13]\n"
- "fmls v1.4s, v23.4s, v0.s[1]\n"
- "fsub v17.4s, v17.4s, v6.4s\n"
- "fmls v17.4s, v24.4s, v0.s[1]\n"
- "str q1, [x14, x23]\n"
- "str q17, [x14, x15]\n"
- "add x14, x14, #16\n"
- "ldr q2, [x27, x20]\n"
- "mov v4.16b, v2.16b\n"
- "ldr q17, [x27, x9]\n"
- "mov v12.16b, v2.16b\n"
- "ldr q18, [x27]\n"
- "fmla v4.4s, v18.4s, v0.s[2]\n"
- "ldr q3, [x27, x21]\n"
- "mov v6.16b, v2.16b\n"
- "ldr q5, [x27, x19]\n"
- "mov v1.16b, v2.16b\n"
- "ldr q18, [x27, %[input_col_stride1]]\n"
- "fmls v4.4s, v17.4s, v0.s[3]\n"
- "add x27, x27, #16\n"
- "fmls v12.4s, v18.4s, v0.s[2]\n"
- "sub %w[n_channels], %w[n_channels], #4\n"
- "fmla v6.4s, v18.4s, v0.s[2]\n"
- "cmp %w[n_channels], #4\n"
- "fmls v1.4s, v18.4s, v0.s[1]\n"
- "mov v2.16b, v2.16b\n"
- "fmls v12.4s, v17.4s, v0.s[2]\n"
- "mov v3.16b, v3.16b\n"
- "fmls v6.4s, v17.4s, v0.s[2]\n"
- "fmla v2.4s, v18.4s, v0.s[1]\n"
- "fsub v1.4s, v1.4s, v17.4s\n"
- "fmla v3.4s, v18.4s, v0.s[2]\n"
- "fadd v12.4s, v12.4s, v5.4s\n"
- "fmla v1.4s, v5.4s, v0.s[1]\n"
- "fsub v6.4s, v6.4s, v5.4s\n"
- "fsub v2.4s, v2.4s, v17.4s\n"
- "fmls v2.4s, v5.4s, v0.s[1]\n"
- "fmls v3.4s, v5.4s, v0.s[3]\n"
- "mov v4.16b, v4.16b\n"
- "mov v16.16b, v12.16b\n"
- "mov v5.16b, v6.16b\n"
- "mov v6.16b, v1.16b\n"
- "fmla v4.4s, v10.4s, v0.s[2]\n"
- "fmla v16.4s, v9.4s, v0.s[2]\n"
- "fmla v5.4s, v7.4s, v0.s[2]\n"
- "fmla v6.4s, v8.4s, v0.s[2]\n"
- "mov v9.16b, v2.16b\n"
- "mov v10.16b, v3.16b\n"
- "fmls v4.4s, v14.4s, v0.s[3]\n"
- "fmls v16.4s, v13.4s, v0.s[3]\n"
- "fmls v5.4s, v21.4s, v0.s[3]\n"
- "fmls v6.4s, v22.4s, v0.s[3]\n"
- "fmla v9.4s, v11.4s, v0.s[2]\n"
- "fmla v10.4s, v15.4s, v0.s[2]\n"
- "str q4, [x24]\n"
- "str q16, [x24, %[output_col_stride1]]\n"
- "str q5, [x24, x11]\n"
- "str q6, [x24, x13]\n"
- "fmls v9.4s, v23.4s, v0.s[3]\n"
- "fmls v10.4s, v24.4s, v0.s[3]\n"
- "str q9, [x24, x23]\n"
- "str q10, [x24, x15]\n"
- "add x24, x24, #16\n"
- "bge 1b\n"
- "2:\n"
- "cmp %w[n_channels], #2\n"
- "blt 3f\n"
- "ldr d8, [%[inptr0], x20]\n"
- "mov v14.16b, v8.16b\n"
- "ldr d2, [%[inptr0], x9]\n"
- "mov v10.16b, v8.16b\n"
- "ldr d9, [%[inptr0]]\n"
- "fmla v14.4s, v9.4s, v0.s[2]\n"
- "ldr d1, [%[inptr0], x21]\n"
- "mov v9.16b, v8.16b\n"
- "ldr d4, [%[inptr0], x19]\n"
- "mov v7.16b, v8.16b\n"
- "ldr d12, [%[inptr0], %[input_col_stride1]]\n"
- "fmls v14.4s, v2.4s, v0.s[3]\n"
- "ldr d5, [x16, x20]\n"
- "fmls v10.4s, v12.4s, v0.s[2]\n"
- "ldr d20, [x16, x9]\n"
- "fmla v9.4s, v12.4s, v0.s[2]\n"
- "ldr d3, [x16]\n"
- "fmls v7.4s, v12.4s, v0.s[1]\n"
- "ldr d6, [x16, x21]\n"
- "fmls v10.4s, v2.4s, v0.s[2]\n"
- "ldr d16, [x16, x19]\n"
- "fmls v9.4s, v2.4s, v0.s[2]\n"
- "ldr d22, [x16, %[input_col_stride1]]\n"
- "fsub v7.4s, v7.4s, v2.4s\n"
- "ldr d17, [x17, x20]\n"
- "fadd v10.4s, v10.4s, v4.4s\n"
- "ldr d15, [x17, x9]\n"
- "fsub v9.4s, v9.4s, v4.4s\n"
- "ldr d19, [x17]\n"
- "fmla v7.4s, v4.4s, v0.s[1]\n"
- "ldr d18, [x17, x21]\n"
- "mov v8.16b, v8.16b\n"
- "ldr d13, [x17, x19]\n"
- "mov v11.16b, v1.16b\n"
- "ldr d21, [x17, %[input_col_stride1]]\n"
- "fmla v8.4s, v12.4s, v0.s[1]\n"
- "add %[inptr0], %[inptr0], #8\n"
- "fmla v11.4s, v12.4s, v0.s[2]\n"
- "add x16, x16, #8\n"
- "mov v1.16b, v5.16b\n"
- "add x17, x17, #8\n"
- "fsub v8.4s, v8.4s, v2.4s\n"
- "mov v2.16b, v5.16b\n"
- "fmls v8.4s, v4.4s, v0.s[1]\n"
- "fmls v11.4s, v4.4s, v0.s[3]\n"
- "fmla v1.4s, v3.4s, v0.s[2]\n"
- "fmls v2.4s, v22.4s, v0.s[2]\n"
- "mov v3.16b, v5.16b\n"
- "mov v4.16b, v5.16b\n"
- "mov v5.16b, v5.16b\n"
- "mov v6.16b, v6.16b\n"
- "fmls v1.4s, v20.4s, v0.s[3]\n"
- "fmls v2.4s, v20.4s, v0.s[2]\n"
- "fmla v3.4s, v22.4s, v0.s[2]\n"
- "fmls v4.4s, v22.4s, v0.s[1]\n"
- "fmla v5.4s, v22.4s, v0.s[1]\n"
- "fmla v6.4s, v22.4s, v0.s[2]\n"
- "fadd v2.4s, v2.4s, v16.4s\n"
- "mov v12.16b, v17.16b\n"
- "fmls v3.4s, v20.4s, v0.s[2]\n"
- "fsub v4.4s, v4.4s, v20.4s\n"
- "fmla v4.4s, v16.4s, v0.s[1]\n"
- "fsub v5.4s, v5.4s, v20.4s\n"
- "fmls v5.4s, v16.4s, v0.s[1]\n"
- "fmls v6.4s, v16.4s, v0.s[3]\n"
- "fsub v3.4s, v3.4s, v16.4s\n"
- "fmla v12.4s, v19.4s, v0.s[2]\n"
- "mov v19.16b, v17.16b\n"
- "mov v20.16b, v17.16b\n"
- "mov v16.16b, v17.16b\n"
- "mov v17.16b, v17.16b\n"
- "fmls v12.4s, v15.4s, v0.s[3]\n"
- "fmls v19.4s, v21.4s, v0.s[2]\n"
- "fmla v20.4s, v21.4s, v0.s[2]\n"
- "fmls v16.4s, v21.4s, v0.s[1]\n"
- "fmla v17.4s, v21.4s, v0.s[1]\n"
- "mov v18.16b, v18.16b\n"
- "fmls v19.4s, v15.4s, v0.s[2]\n"
- "mov v23.16b, v12.16b\n"
- "fmls v20.4s, v15.4s, v0.s[2]\n"
- "fsub v16.4s, v16.4s, v15.4s\n"
- "fmla v16.4s, v13.4s, v0.s[1]\n"
- "fsub v17.4s, v17.4s, v15.4s\n"
- "fadd v19.4s, v19.4s, v13.4s\n"
- "fmls v17.4s, v13.4s, v0.s[1]\n"
- "fsub v20.4s, v20.4s, v13.4s\n"
- "fmla v18.4s, v21.4s, v0.s[2]\n"
- "fmla v23.4s, v14.4s, v0.s[2]\n"
- "mov v15.16b, v19.16b\n"
- "mov v14.16b, v20.16b\n"
- "mov v24.16b, v16.16b\n"
- "fmls v18.4s, v13.4s, v0.s[3]\n"
- "fmla v15.4s, v10.4s, v0.s[2]\n"
- "fmls v23.4s, v1.4s, v0.s[3]\n"
- "fmla v14.4s, v9.4s, v0.s[2]\n"
- "fmla v24.4s, v7.4s, v0.s[2]\n"
- "mov v10.16b, v17.16b\n"
- "fmls v15.4s, v2.4s, v0.s[3]\n"
- "mov v7.16b, v18.16b\n"
- "str d23, [%[outptr0]]\n"
- "fmls v14.4s, v3.4s, v0.s[3]\n"
- "fmls v24.4s, v4.4s, v0.s[3]\n"
- "fmla v10.4s, v8.4s, v0.s[2]\n"
- "str d15, [%[outptr0], %[output_col_stride1]]\n"
- "fmla v7.4s, v11.4s, v0.s[2]\n"
- "str d14, [%[outptr0], x11]\n"
- "fmls v10.4s, v5.4s, v0.s[3]\n"
- "str d24, [%[outptr0], x13]\n"
- "fmls v7.4s, v6.4s, v0.s[3]\n"
- "str d10, [%[outptr0], x23]\n"
- "str d7, [%[outptr0], x15]\n"
- "add %[outptr0], %[outptr0], #8\n"
- "mov v26.16b, v12.16b\n"
- "mov v25.16b, v19.16b\n"
- "ldr d11, [x25, x20]\n"
- "mov v10.16b, v11.16b\n"
- "ldr d23, [x25, x9]\n"
- "mov v9.16b, v11.16b\n"
- "ldr d7, [x25]\n"
- "fmla v10.4s, v7.4s, v0.s[2]\n"
- "ldr d13, [x25, x21]\n"
- "mov v7.16b, v11.16b\n"
- "ldr d31, [x25, x19]\n"
- "mov v8.16b, v11.16b\n"
- "ldr d21, [x25, %[input_col_stride1]]\n"
- "fmls v10.4s, v23.4s, v0.s[3]\n"
- "ldr d30, [x26, x20]\n"
- "fmls v9.4s, v21.4s, v0.s[2]\n"
- "ldr d29, [x26, x9]\n"
- "fmla v7.4s, v21.4s, v0.s[2]\n"
- "ldr d22, [x26]\n"
- "fmls v8.4s, v21.4s, v0.s[1]\n"
- "ldr d24, [x26, x21]\n"
- "fmls v9.4s, v23.4s, v0.s[2]\n"
- "ldr d27, [x26, x19]\n"
- "fmls v7.4s, v23.4s, v0.s[2]\n"
- "ldr d28, [x26, %[input_col_stride1]]\n"
- "fsub v8.4s, v8.4s, v23.4s\n"
- "add x25, x25, #8\n"
- "fadd v9.4s, v9.4s, v31.4s\n"
- "add x26, x26, #8\n"
- "fsub v7.4s, v7.4s, v31.4s\n"
- "fmla v8.4s, v31.4s, v0.s[1]\n"
- "mov v11.16b, v11.16b\n"
- "mov v15.16b, v13.16b\n"
- "mov v14.16b, v30.16b\n"
- "mov v13.16b, v30.16b\n"
- "fmla v11.4s, v21.4s, v0.s[1]\n"
- "fmla v15.4s, v21.4s, v0.s[2]\n"
- "fmla v14.4s, v22.4s, v0.s[2]\n"
- "fmls v13.4s, v28.4s, v0.s[2]\n"
- "mov v21.16b, v30.16b\n"
- "mov v22.16b, v30.16b\n"
- "fsub v11.4s, v11.4s, v23.4s\n"
- "fmls v15.4s, v31.4s, v0.s[3]\n"
- "fmls v11.4s, v31.4s, v0.s[1]\n"
- "fmls v14.4s, v29.4s, v0.s[3]\n"
- "fmls v13.4s, v29.4s, v0.s[2]\n"
- "fmla v21.4s, v28.4s, v0.s[2]\n"
- "fmls v22.4s, v28.4s, v0.s[1]\n"
- "mov v23.16b, v30.16b\n"
- "mov v24.16b, v24.16b\n"
- "fmls v26.4s, v10.4s, v0.s[2]\n"
- "fadd v13.4s, v13.4s, v27.4s\n"
- "fmls v21.4s, v29.4s, v0.s[2]\n"
- "fsub v22.4s, v22.4s, v29.4s\n"
- "fmla v23.4s, v28.4s, v0.s[1]\n"
- "fmla v22.4s, v27.4s, v0.s[1]\n"
- "fmla v24.4s, v28.4s, v0.s[2]\n"
- "fsub v21.4s, v21.4s, v27.4s\n"
- "fmls v26.4s, v1.4s, v0.s[2]\n"
- "fsub v23.4s, v23.4s, v29.4s\n"
- "fmls v25.4s, v9.4s, v0.s[2]\n"
- "fmls v23.4s, v27.4s, v0.s[1]\n"
- "fmls v24.4s, v27.4s, v0.s[3]\n"
- "fadd v26.4s, v26.4s, v14.4s\n"
- "mov v27.16b, v20.16b\n"
- "str d26, [x28]\n"
- "fmls v25.4s, v2.4s, v0.s[2]\n"
- "fmls v27.4s, v7.4s, v0.s[2]\n"
- "mov v31.16b, v16.16b\n"
- "mov v30.16b, v17.16b\n"
- "mov v29.16b, v18.16b\n"
- "fadd v25.4s, v25.4s, v13.4s\n"
- "fmls v31.4s, v8.4s, v0.s[2]\n"
- "str d25, [x28, %[output_col_stride1]]\n"
- "fmls v27.4s, v3.4s, v0.s[2]\n"
- "fmls v30.4s, v11.4s, v0.s[2]\n"
- "fmls v29.4s, v15.4s, v0.s[2]\n"
- "fmls v31.4s, v4.4s, v0.s[2]\n"
- "mov v26.16b, v12.16b\n"
- "fadd v27.4s, v27.4s, v21.4s\n"
- "mov v25.16b, v19.16b\n"
- "str d27, [x28, x11]\n"
- "fmls v30.4s, v5.4s, v0.s[2]\n"
- "fadd v31.4s, v31.4s, v22.4s\n"
- "fmls v29.4s, v6.4s, v0.s[2]\n"
- "str d31, [x28, x13]\n"
- "fmla v26.4s, v10.4s, v0.s[2]\n"
- "fadd v30.4s, v30.4s, v23.4s\n"
- "fmla v25.4s, v9.4s, v0.s[2]\n"
- "str d30, [x28, x23]\n"
- "fadd v29.4s, v29.4s, v24.4s\n"
- "str d29, [x28, x15]\n"
- "fmls v26.4s, v1.4s, v0.s[2]\n"
- "fmls v25.4s, v2.4s, v0.s[2]\n"
- "add x28, x28, #8\n"
- "mov v30.16b, v20.16b\n"
- "mov v29.16b, v16.16b\n"
- "fsub v26.4s, v26.4s, v14.4s\n"
- "mov v28.16b, v17.16b\n"
- "str d26, [x22]\n"
- "fsub v25.4s, v25.4s, v13.4s\n"
- "str d25, [x22, %[output_col_stride1]]\n"
- "fmla v30.4s, v7.4s, v0.s[2]\n"
- "fmla v29.4s, v8.4s, v0.s[2]\n"
- "fmla v28.4s, v11.4s, v0.s[2]\n"
- "mov v26.16b, v18.16b\n"
- "mov v25.16b, v12.16b\n"
- "fmls v30.4s, v3.4s, v0.s[2]\n"
- "mov v31.16b, v19.16b\n"
- "fmls v29.4s, v4.4s, v0.s[2]\n"
- "fmls v28.4s, v5.4s, v0.s[2]\n"
- "fmla v26.4s, v15.4s, v0.s[2]\n"
- "fmls v25.4s, v10.4s, v0.s[1]\n"
- "fsub v30.4s, v30.4s, v21.4s\n"
- "fmls v31.4s, v9.4s, v0.s[1]\n"
- "str d30, [x22, x11]\n"
- "fsub v29.4s, v29.4s, v22.4s\n"
- "str d29, [x22, x13]\n"
- "fsub v28.4s, v28.4s, v23.4s\n"
- "str d28, [x22, x23]\n"
- "fmls v26.4s, v6.4s, v0.s[2]\n"
- "fsub v25.4s, v25.4s, v1.4s\n"
- "fsub v31.4s, v31.4s, v2.4s\n"
- "fmla v25.4s, v14.4s, v0.s[1]\n"
- "fmla v31.4s, v13.4s, v0.s[1]\n"
- "fsub v26.4s, v26.4s, v24.4s\n"
- "mov v27.16b, v20.16b\n"
- "str d26, [x22, x15]\n"
- "mov v26.16b, v16.16b\n"
- "str d25, [x12]\n"
- "fmls v27.4s, v7.4s, v0.s[1]\n"
- "str d31, [x12, %[output_col_stride1]]\n"
- "fmls v26.4s, v8.4s, v0.s[1]\n"
- "mov v25.16b, v17.16b\n"
- "add x22, x22, #8\n"
- "fsub v27.4s, v27.4s, v3.4s\n"
- "mov v28.16b, v18.16b\n"
- "fmla v27.4s, v21.4s, v0.s[1]\n"
- "fsub v26.4s, v26.4s, v4.4s\n"
- "fmla v26.4s, v22.4s, v0.s[1]\n"
- "fmls v25.4s, v11.4s, v0.s[1]\n"
- "fmls v28.4s, v15.4s, v0.s[1]\n"
- "mov v12.16b, v12.16b\n"
- "str d27, [x12, x11]\n"
- "mov v19.16b, v19.16b\n"
- "str d26, [x12, x13]\n"
- "fsub v25.4s, v25.4s, v5.4s\n"
- "fmla v25.4s, v23.4s, v0.s[1]\n"
- "fsub v28.4s, v28.4s, v6.4s\n"
- "fmla v28.4s, v24.4s, v0.s[1]\n"
- "fmla v12.4s, v10.4s, v0.s[1]\n"
- "fmla v19.4s, v9.4s, v0.s[1]\n"
- "mov v20.16b, v20.16b\n"
- "str d25, [x12, x23]\n"
- "mov v16.16b, v16.16b\n"
- "str d28, [x12, x15]\n"
- "fsub v12.4s, v12.4s, v1.4s\n"
- "fmls v12.4s, v14.4s, v0.s[1]\n"
- "add x12, x12, #8\n"
- "fsub v19.4s, v19.4s, v2.4s\n"
- "fmla v20.4s, v7.4s, v0.s[1]\n"
- "fmls v19.4s, v13.4s, v0.s[1]\n"
- "fmla v16.4s, v8.4s, v0.s[1]\n"
- "str d12, [x14]\n"
- "mov v1.16b, v17.16b\n"
- "fsub v20.4s, v20.4s, v3.4s\n"
- "mov v17.16b, v18.16b\n"
- "str d19, [x14, %[output_col_stride1]]\n"
- "fmls v20.4s, v21.4s, v0.s[1]\n"
- "fsub v16.4s, v16.4s, v4.4s\n"
- "fmla v1.4s, v11.4s, v0.s[1]\n"
- "fmls v16.4s, v22.4s, v0.s[1]\n"
- "fmla v17.4s, v15.4s, v0.s[1]\n"
- "str d20, [x14, x11]\n"
- "fsub v1.4s, v1.4s, v5.4s\n"
- "str d16, [x14, x13]\n"
- "fmls v1.4s, v23.4s, v0.s[1]\n"
- "fsub v17.4s, v17.4s, v6.4s\n"
- "fmls v17.4s, v24.4s, v0.s[1]\n"
- "str d1, [x14, x23]\n"
- "str d17, [x14, x15]\n"
- "add x14, x14, #8\n"
- "ldr d2, [x27, x20]\n"
- "mov v4.16b, v2.16b\n"
- "ldr d17, [x27, x9]\n"
- "mov v12.16b, v2.16b\n"
- "ldr d18, [x27]\n"
- "fmla v4.4s, v18.4s, v0.s[2]\n"
- "ldr d3, [x27, x21]\n"
- "mov v6.16b, v2.16b\n"
- "ldr d5, [x27, x19]\n"
- "mov v1.16b, v2.16b\n"
- "ldr d18, [x27, %[input_col_stride1]]\n"
- "fmls v4.4s, v17.4s, v0.s[3]\n"
- "add x27, x27, #8\n"
- "fmls v12.4s, v18.4s, v0.s[2]\n"
- "sub %w[n_channels], %w[n_channels], #2\n"
- "fmla v6.4s, v18.4s, v0.s[2]\n"
- "fmls v1.4s, v18.4s, v0.s[1]\n"
- "mov v2.16b, v2.16b\n"
- "mov v3.16b, v3.16b\n"
- "fmls v12.4s, v17.4s, v0.s[2]\n"
- "mov v4.16b, v4.16b\n"
- "fmls v6.4s, v17.4s, v0.s[2]\n"
- "fsub v1.4s, v1.4s, v17.4s\n"
- "fmla v1.4s, v5.4s, v0.s[1]\n"
- "fmla v2.4s, v18.4s, v0.s[1]\n"
- "fadd v12.4s, v12.4s, v5.4s\n"
- "fmla v3.4s, v18.4s, v0.s[2]\n"
- "fsub v6.4s, v6.4s, v5.4s\n"
- "fmla v4.4s, v10.4s, v0.s[2]\n"
- "fsub v2.4s, v2.4s, v17.4s\n"
- "mov v16.16b, v12.16b\n"
- "fmls v2.4s, v5.4s, v0.s[1]\n"
- "fmls v3.4s, v5.4s, v0.s[3]\n"
- "fmls v4.4s, v14.4s, v0.s[3]\n"
- "fmla v16.4s, v9.4s, v0.s[2]\n"
- "mov v5.16b, v6.16b\n"
- "mov v6.16b, v1.16b\n"
- "mov v9.16b, v2.16b\n"
- "mov v10.16b, v3.16b\n"
- "str d4, [x24]\n"
- "fmls v16.4s, v13.4s, v0.s[3]\n"
- "fmla v5.4s, v7.4s, v0.s[2]\n"
- "fmla v6.4s, v8.4s, v0.s[2]\n"
- "fmla v9.4s, v11.4s, v0.s[2]\n"
- "fmla v10.4s, v15.4s, v0.s[2]\n"
- "str d16, [x24, %[output_col_stride1]]\n"
- "fmls v5.4s, v21.4s, v0.s[3]\n"
- "fmls v6.4s, v22.4s, v0.s[3]\n"
- "fmls v9.4s, v23.4s, v0.s[3]\n"
- "fmls v10.4s, v24.4s, v0.s[3]\n"
- "str d5, [x24, x11]\n"
- "str d6, [x24, x13]\n"
- "str d9, [x24, x23]\n"
- "str d10, [x24, x15]\n"
- "add x24, x24, #8\n"
- "3:\n"
- "cbz %w[n_channels], 4f\n"
- "ldr s8, [%[inptr0], x20]\n"
- "mov v14.16b, v8.16b\n"
- "ldr s2, [%[inptr0], x9]\n"
- "mov v10.16b, v8.16b\n"
- "ldr s9, [%[inptr0]]\n"
- "fmla v14.4s, v9.4s, v0.s[2]\n"
- "ldr s1, [%[inptr0], x21]\n"
- "mov v9.16b, v8.16b\n"
- "ldr s4, [%[inptr0], x19]\n"
- "mov v7.16b, v8.16b\n"
- "ldr s12, [%[inptr0], %[input_col_stride1]]\n"
- "fmls v14.4s, v2.4s, v0.s[3]\n"
- "ldr s5, [x16, x20]\n"
- "fmls v10.4s, v12.4s, v0.s[2]\n"
- "ldr s20, [x16, x9]\n"
- "fmla v9.4s, v12.4s, v0.s[2]\n"
- "ldr s3, [x16]\n"
- "fmls v7.4s, v12.4s, v0.s[1]\n"
- "ldr s6, [x16, x21]\n"
- "fmls v10.4s, v2.4s, v0.s[2]\n"
- "ldr s16, [x16, x19]\n"
- "fmls v9.4s, v2.4s, v0.s[2]\n"
- "ldr s22, [x16, %[input_col_stride1]]\n"
- "fsub v7.4s, v7.4s, v2.4s\n"
- "ldr s17, [x17, x20]\n"
- "fadd v10.4s, v10.4s, v4.4s\n"
- "ldr s15, [x17, x9]\n"
- "fsub v9.4s, v9.4s, v4.4s\n"
- "ldr s19, [x17]\n"
- "fmla v7.4s, v4.4s, v0.s[1]\n"
- "ldr s18, [x17, x21]\n"
- "mov v8.16b, v8.16b\n"
- "ldr s13, [x17, x19]\n"
- "mov v11.16b, v1.16b\n"
- "ldr s21, [x17, %[input_col_stride1]]\n"
- "fmla v8.4s, v12.4s, v0.s[1]\n"
- "add %[inptr0], %[inptr0], #4\n"
- "fmla v11.4s, v12.4s, v0.s[2]\n"
- "add x16, x16, #4\n"
- "mov v1.16b, v5.16b\n"
- "add x17, x17, #4\n"
- "fsub v8.4s, v8.4s, v2.4s\n"
- "mov v2.16b, v5.16b\n"
- "fmls v8.4s, v4.4s, v0.s[1]\n"
- "fmls v11.4s, v4.4s, v0.s[3]\n"
- "fmla v1.4s, v3.4s, v0.s[2]\n"
- "fmls v2.4s, v22.4s, v0.s[2]\n"
- "mov v3.16b, v5.16b\n"
- "mov v4.16b, v5.16b\n"
- "mov v5.16b, v5.16b\n"
- "mov v6.16b, v6.16b\n"
- "fmls v1.4s, v20.4s, v0.s[3]\n"
- "fmls v2.4s, v20.4s, v0.s[2]\n"
- "fmla v3.4s, v22.4s, v0.s[2]\n"
- "fmls v4.4s, v22.4s, v0.s[1]\n"
- "fmla v5.4s, v22.4s, v0.s[1]\n"
- "fmla v6.4s, v22.4s, v0.s[2]\n"
- "fadd v2.4s, v2.4s, v16.4s\n"
- "mov v12.16b, v17.16b\n"
- "fmls v3.4s, v20.4s, v0.s[2]\n"
- "fsub v4.4s, v4.4s, v20.4s\n"
- "fmla v4.4s, v16.4s, v0.s[1]\n"
- "fsub v5.4s, v5.4s, v20.4s\n"
- "fmls v5.4s, v16.4s, v0.s[1]\n"
- "fmls v6.4s, v16.4s, v0.s[3]\n"
- "fsub v3.4s, v3.4s, v16.4s\n"
- "fmla v12.4s, v19.4s, v0.s[2]\n"
- "mov v19.16b, v17.16b\n"
- "mov v20.16b, v17.16b\n"
- "mov v16.16b, v17.16b\n"
- "mov v17.16b, v17.16b\n"
- "fmls v12.4s, v15.4s, v0.s[3]\n"
- "fmls v19.4s, v21.4s, v0.s[2]\n"
- "fmla v20.4s, v21.4s, v0.s[2]\n"
- "fmls v16.4s, v21.4s, v0.s[1]\n"
- "fmla v17.4s, v21.4s, v0.s[1]\n"
- "mov v18.16b, v18.16b\n"
- "fmls v19.4s, v15.4s, v0.s[2]\n"
- "mov v23.16b, v12.16b\n"
- "fmls v20.4s, v15.4s, v0.s[2]\n"
- "fsub v16.4s, v16.4s, v15.4s\n"
- "fmla v16.4s, v13.4s, v0.s[1]\n"
- "fsub v17.4s, v17.4s, v15.4s\n"
- "fadd v19.4s, v19.4s, v13.4s\n"
- "fmls v17.4s, v13.4s, v0.s[1]\n"
- "fsub v20.4s, v20.4s, v13.4s\n"
- "fmla v18.4s, v21.4s, v0.s[2]\n"
- "fmla v23.4s, v14.4s, v0.s[2]\n"
- "mov v15.16b, v19.16b\n"
- "mov v14.16b, v20.16b\n"
- "mov v24.16b, v16.16b\n"
- "fmls v18.4s, v13.4s, v0.s[3]\n"
- "fmla v15.4s, v10.4s, v0.s[2]\n"
- "fmls v23.4s, v1.4s, v0.s[3]\n"
- "fmla v14.4s, v9.4s, v0.s[2]\n"
- "fmla v24.4s, v7.4s, v0.s[2]\n"
- "mov v10.16b, v17.16b\n"
- "fmls v15.4s, v2.4s, v0.s[3]\n"
- "mov v7.16b, v18.16b\n"
- "str s23, [%[outptr0]]\n"
- "fmls v14.4s, v3.4s, v0.s[3]\n"
- "fmls v24.4s, v4.4s, v0.s[3]\n"
- "fmla v10.4s, v8.4s, v0.s[2]\n"
- "str s15, [%[outptr0], %[output_col_stride1]]\n"
- "fmla v7.4s, v11.4s, v0.s[2]\n"
- "str s14, [%[outptr0], x11]\n"
- "fmls v10.4s, v5.4s, v0.s[3]\n"
- "str s24, [%[outptr0], x13]\n"
- "fmls v7.4s, v6.4s, v0.s[3]\n"
- "str s10, [%[outptr0], x23]\n"
- "str s7, [%[outptr0], x15]\n"
- "add %[outptr0], %[outptr0], #4\n"
- "mov v26.16b, v12.16b\n"
- "mov v25.16b, v19.16b\n"
- "ldr s11, [x25, x20]\n"
- "mov v10.16b, v11.16b\n"
- "ldr s23, [x25, x9]\n"
- "mov v9.16b, v11.16b\n"
- "ldr s7, [x25]\n"
- "fmla v10.4s, v7.4s, v0.s[2]\n"
- "ldr s13, [x25, x21]\n"
- "mov v7.16b, v11.16b\n"
- "ldr s31, [x25, x19]\n"
- "mov v8.16b, v11.16b\n"
- "ldr s21, [x25, %[input_col_stride1]]\n"
- "fmls v10.4s, v23.4s, v0.s[3]\n"
- "ldr s30, [x26, x20]\n"
- "fmls v9.4s, v21.4s, v0.s[2]\n"
- "ldr s29, [x26, x9]\n"
- "fmla v7.4s, v21.4s, v0.s[2]\n"
- "ldr s22, [x26]\n"
- "fmls v8.4s, v21.4s, v0.s[1]\n"
- "ldr s24, [x26, x21]\n"
- "fmls v9.4s, v23.4s, v0.s[2]\n"
- "ldr s27, [x26, x19]\n"
- "fmls v7.4s, v23.4s, v0.s[2]\n"
- "ldr s28, [x26, %[input_col_stride1]]\n"
- "fsub v8.4s, v8.4s, v23.4s\n"
- "add x25, x25, #4\n"
- "fadd v9.4s, v9.4s, v31.4s\n"
- "add x26, x26, #4\n"
- "fsub v7.4s, v7.4s, v31.4s\n"
- "fmla v8.4s, v31.4s, v0.s[1]\n"
- "mov v11.16b, v11.16b\n"
- "mov v15.16b, v13.16b\n"
- "mov v14.16b, v30.16b\n"
- "mov v13.16b, v30.16b\n"
- "fmla v11.4s, v21.4s, v0.s[1]\n"
- "fmla v15.4s, v21.4s, v0.s[2]\n"
- "fmla v14.4s, v22.4s, v0.s[2]\n"
- "fmls v13.4s, v28.4s, v0.s[2]\n"
- "mov v21.16b, v30.16b\n"
- "mov v22.16b, v30.16b\n"
- "fsub v11.4s, v11.4s, v23.4s\n"
- "fmls v15.4s, v31.4s, v0.s[3]\n"
- "fmls v11.4s, v31.4s, v0.s[1]\n"
- "fmls v14.4s, v29.4s, v0.s[3]\n"
- "fmls v13.4s, v29.4s, v0.s[2]\n"
- "fmla v21.4s, v28.4s, v0.s[2]\n"
- "fmls v22.4s, v28.4s, v0.s[1]\n"
- "mov v23.16b, v30.16b\n"
- "mov v24.16b, v24.16b\n"
- "fmls v26.4s, v10.4s, v0.s[2]\n"
- "fadd v13.4s, v13.4s, v27.4s\n"
- "fmls v21.4s, v29.4s, v0.s[2]\n"
- "fsub v22.4s, v22.4s, v29.4s\n"
- "fmla v23.4s, v28.4s, v0.s[1]\n"
- "fmla v22.4s, v27.4s, v0.s[1]\n"
- "fmla v24.4s, v28.4s, v0.s[2]\n"
- "fsub v21.4s, v21.4s, v27.4s\n"
- "fmls v26.4s, v1.4s, v0.s[2]\n"
- "fsub v23.4s, v23.4s, v29.4s\n"
- "fmls v25.4s, v9.4s, v0.s[2]\n"
- "fmls v23.4s, v27.4s, v0.s[1]\n"
- "fmls v24.4s, v27.4s, v0.s[3]\n"
- "fadd v26.4s, v26.4s, v14.4s\n"
- "mov v27.16b, v20.16b\n"
- "str s26, [x28]\n"
- "fmls v25.4s, v2.4s, v0.s[2]\n"
- "fmls v27.4s, v7.4s, v0.s[2]\n"
- "mov v31.16b, v16.16b\n"
- "mov v30.16b, v17.16b\n"
- "mov v29.16b, v18.16b\n"
- "fadd v25.4s, v25.4s, v13.4s\n"
- "fmls v31.4s, v8.4s, v0.s[2]\n"
- "str s25, [x28, %[output_col_stride1]]\n"
- "fmls v27.4s, v3.4s, v0.s[2]\n"
- "fmls v30.4s, v11.4s, v0.s[2]\n"
- "fmls v29.4s, v15.4s, v0.s[2]\n"
- "fmls v31.4s, v4.4s, v0.s[2]\n"
- "mov v26.16b, v12.16b\n"
- "fadd v27.4s, v27.4s, v21.4s\n"
- "mov v25.16b, v19.16b\n"
- "str s27, [x28, x11]\n"
- "fmls v30.4s, v5.4s, v0.s[2]\n"
- "fadd v31.4s, v31.4s, v22.4s\n"
- "fmls v29.4s, v6.4s, v0.s[2]\n"
- "str s31, [x28, x13]\n"
- "fmla v26.4s, v10.4s, v0.s[2]\n"
- "fadd v30.4s, v30.4s, v23.4s\n"
- "fmla v25.4s, v9.4s, v0.s[2]\n"
- "str s30, [x28, x23]\n"
- "fadd v29.4s, v29.4s, v24.4s\n"
- "str s29, [x28, x15]\n"
- "fmls v26.4s, v1.4s, v0.s[2]\n"
- "fmls v25.4s, v2.4s, v0.s[2]\n"
- "add x28, x28, #4\n"
- "mov v30.16b, v20.16b\n"
- "mov v29.16b, v16.16b\n"
- "fsub v26.4s, v26.4s, v14.4s\n"
- "mov v28.16b, v17.16b\n"
- "str s26, [x22]\n"
- "fsub v25.4s, v25.4s, v13.4s\n"
- "str s25, [x22, %[output_col_stride1]]\n"
- "fmla v30.4s, v7.4s, v0.s[2]\n"
- "fmla v29.4s, v8.4s, v0.s[2]\n"
- "fmla v28.4s, v11.4s, v0.s[2]\n"
- "mov v26.16b, v18.16b\n"
- "mov v25.16b, v12.16b\n"
- "fmls v30.4s, v3.4s, v0.s[2]\n"
- "mov v31.16b, v19.16b\n"
- "fmls v29.4s, v4.4s, v0.s[2]\n"
- "fmls v28.4s, v5.4s, v0.s[2]\n"
- "fmla v26.4s, v15.4s, v0.s[2]\n"
- "fmls v25.4s, v10.4s, v0.s[1]\n"
- "fsub v30.4s, v30.4s, v21.4s\n"
- "fmls v31.4s, v9.4s, v0.s[1]\n"
- "str s30, [x22, x11]\n"
- "fsub v29.4s, v29.4s, v22.4s\n"
- "str s29, [x22, x13]\n"
- "fsub v28.4s, v28.4s, v23.4s\n"
- "str s28, [x22, x23]\n"
- "fmls v26.4s, v6.4s, v0.s[2]\n"
- "fsub v25.4s, v25.4s, v1.4s\n"
- "fsub v31.4s, v31.4s, v2.4s\n"
- "fmla v25.4s, v14.4s, v0.s[1]\n"
- "fmla v31.4s, v13.4s, v0.s[1]\n"
- "fsub v26.4s, v26.4s, v24.4s\n"
- "mov v27.16b, v20.16b\n"
- "str s26, [x22, x15]\n"
- "mov v26.16b, v16.16b\n"
- "str s25, [x12]\n"
- "fmls v27.4s, v7.4s, v0.s[1]\n"
- "str s31, [x12, %[output_col_stride1]]\n"
- "fmls v26.4s, v8.4s, v0.s[1]\n"
- "mov v25.16b, v17.16b\n"
- "add x22, x22, #4\n"
- "fsub v27.4s, v27.4s, v3.4s\n"
- "mov v28.16b, v18.16b\n"
- "fmla v27.4s, v21.4s, v0.s[1]\n"
- "fsub v26.4s, v26.4s, v4.4s\n"
- "fmla v26.4s, v22.4s, v0.s[1]\n"
- "fmls v25.4s, v11.4s, v0.s[1]\n"
- "fmls v28.4s, v15.4s, v0.s[1]\n"
- "mov v12.16b, v12.16b\n"
- "str s27, [x12, x11]\n"
- "mov v19.16b, v19.16b\n"
- "str s26, [x12, x13]\n"
- "fsub v25.4s, v25.4s, v5.4s\n"
- "fmla v25.4s, v23.4s, v0.s[1]\n"
- "fsub v28.4s, v28.4s, v6.4s\n"
- "fmla v28.4s, v24.4s, v0.s[1]\n"
- "fmla v12.4s, v10.4s, v0.s[1]\n"
- "fmla v19.4s, v9.4s, v0.s[1]\n"
- "mov v20.16b, v20.16b\n"
- "str s25, [x12, x23]\n"
- "mov v16.16b, v16.16b\n"
- "str s28, [x12, x15]\n"
- "fsub v12.4s, v12.4s, v1.4s\n"
- "fmls v12.4s, v14.4s, v0.s[1]\n"
- "add x12, x12, #4\n"
- "fsub v19.4s, v19.4s, v2.4s\n"
- "fmla v20.4s, v7.4s, v0.s[1]\n"
- "fmls v19.4s, v13.4s, v0.s[1]\n"
- "fmla v16.4s, v8.4s, v0.s[1]\n"
- "str s12, [x14]\n"
- "mov v1.16b, v17.16b\n"
- "fsub v20.4s, v20.4s, v3.4s\n"
- "mov v17.16b, v18.16b\n"
- "str s19, [x14, %[output_col_stride1]]\n"
- "fmls v20.4s, v21.4s, v0.s[1]\n"
- "fsub v16.4s, v16.4s, v4.4s\n"
- "fmla v1.4s, v11.4s, v0.s[1]\n"
- "fmls v16.4s, v22.4s, v0.s[1]\n"
- "fmla v17.4s, v15.4s, v0.s[1]\n"
- "str s20, [x14, x11]\n"
- "fsub v1.4s, v1.4s, v5.4s\n"
- "str s16, [x14, x13]\n"
- "fmls v1.4s, v23.4s, v0.s[1]\n"
- "fsub v17.4s, v17.4s, v6.4s\n"
- "fmls v17.4s, v24.4s, v0.s[1]\n"
- "str s1, [x14, x23]\n"
- "str s17, [x14, x15]\n"
- "add x14, x14, #4\n"
- "ldr s2, [x27, x20]\n"
- "mov v4.16b, v2.16b\n"
- "ldr s17, [x27, x9]\n"
- "mov v12.16b, v2.16b\n"
- "ldr s18, [x27]\n"
- "fmla v4.4s, v18.4s, v0.s[2]\n"
- "ldr s3, [x27, x21]\n"
- "mov v6.16b, v2.16b\n"
- "ldr s5, [x27, x19]\n"
- "mov v1.16b, v2.16b\n"
- "ldr s18, [x27, %[input_col_stride1]]\n"
- "fmls v4.4s, v17.4s, v0.s[3]\n"
- "add x27, x27, #4\n"
- "fmls v12.4s, v18.4s, v0.s[2]\n"
- "fmla v6.4s, v18.4s, v0.s[2]\n"
- "fmls v1.4s, v18.4s, v0.s[1]\n"
- "mov v2.16b, v2.16b\n"
- "mov v3.16b, v3.16b\n"
- "mov v4.16b, v4.16b\n"
- "fmls v12.4s, v17.4s, v0.s[2]\n"
- "fmls v6.4s, v17.4s, v0.s[2]\n"
- "fsub v1.4s, v1.4s, v17.4s\n"
- "fmla v2.4s, v18.4s, v0.s[1]\n"
- "fmla v1.4s, v5.4s, v0.s[1]\n"
- "fmla v3.4s, v18.4s, v0.s[2]\n"
- "fadd v12.4s, v12.4s, v5.4s\n"
- "fsub v6.4s, v6.4s, v5.4s\n"
- "fsub v2.4s, v2.4s, v17.4s\n"
- "fmla v4.4s, v10.4s, v0.s[2]\n"
- "fmls v2.4s, v5.4s, v0.s[1]\n"
- "fmls v3.4s, v5.4s, v0.s[3]\n"
- "mov v16.16b, v12.16b\n"
- "mov v5.16b, v6.16b\n"
- "fmls v4.4s, v14.4s, v0.s[3]\n"
- "mov v6.16b, v1.16b\n"
- "fmla v16.4s, v9.4s, v0.s[2]\n"
- "fmla v5.4s, v7.4s, v0.s[2]\n"
- "fmla v6.4s, v8.4s, v0.s[2]\n"
- "mov v9.16b, v2.16b\n"
- "str s4, [x24]\n"
- "mov v10.16b, v3.16b\n"
- "fmls v16.4s, v13.4s, v0.s[3]\n"
- "fmls v5.4s, v21.4s, v0.s[3]\n"
- "fmls v6.4s, v22.4s, v0.s[3]\n"
- "fmla v9.4s, v11.4s, v0.s[2]\n"
- "fmla v10.4s, v15.4s, v0.s[2]\n"
- "str s16, [x24, %[output_col_stride1]]\n"
- "str s5, [x24, x11]\n"
- "fmls v9.4s, v23.4s, v0.s[3]\n"
- "str s6, [x24, x13]\n"
- "fmls v10.4s, v24.4s, v0.s[3]\n"
- "str s9, [x24, x23]\n"
- "str s10, [x24, x15]\n"
- "add x24, x24, #4\n"
- "4:\n"
- : [outptr0] "+r" (matrix_base),
- [n_channels] "+r" (n_channels),
- [inptr0] "+r" (input_base)
- : [pcoeffs] "r" (pcoeffs),
- [output_row_stride] "r" (6 * matrix_stride * sizeof(float)),
- [output_col_stride1] "r" (matrix_stride * sizeof(float)),
- [input_row_stride] "r" (input_row_stride * sizeof(float)),
- [input_col_stride1] "r" (input_col_stride * sizeof(float))
- : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
- "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
- "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8",
- "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x9", "x19",
- "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
- );
-}
-
-#else // __arm__ not __aarch64__
-
-template <>
-void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* outptr,
- const int matrix_stride
-)
-{
- constexpr int inner_tile_rows = 6;
- constexpr int inner_tile_cols = 6;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_rows][inner_tile_cols];
- for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
- {
- // Get a pointer into the row
- const float* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- float x[inner_tile_rows][inner_tile_cols];
- float XTx[inner_tile_rows][inner_tile_cols];
- float U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used/computed in this kernel
- float32x2_t x[inner_tile_rows][inner_tile_cols];
- float32x2_t XTx[inner_tile_rows][inner_tile_cols];
- float32x2_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdup_n_f32(0.0f);
- XTx[i][j] = vdup_n_f32(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vld1_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 2;
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 2;
- }
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = 0; j < inner_tile_cols; j++)
- {
- XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-
-#endif
-
-template class InputTransform<6, 6, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp
deleted file mode 100644
index e45f1863e3..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-#include "winograd.hpp"
-using namespace winograd;
-
-#define MEMBERFN(RTYPE) template <\
- int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots\
-> RTYPE WeightTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots>
-
-MEMBERFN()::WeightTransform(
- const int n_output_channels,
- const int n_input_channels
-) : _n_output_channels(n_output_channels), _n_input_channels(n_input_channels),
- _matrices(nullptr), _matrix_stride(0), _matrix_row_stride(0), _weights(nullptr)
-{
-
-}
-
-MEMBERFN(void)::set_weight_tensor(const void * const weights)
-{
- _weights = static_cast<const TIn *>(weights);
-}
-
-MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow)
-{
- _matrices = static_cast<TOut *>(mptr);
- _matrix_stride = ldmatrix;
- _matrix_row_stride = ldrow;
-}
-
-MEMBERFN(size_t)::get_working_space_size(unsigned int) const
-{
- return 0;
-}
-
-MEMBERFN(void)::set_working_space(void *)
-{
-}
-
-MEMBERFN(unsigned int)::get_window(void) const
-{
- // TODO When the weights transform supports multithreading, return the number
- // of output channels. For now we return 1 to indicate that the weights must
- // be transformed as a single block.
- // return n_output_channels;
- return 1;
-}
-
-MEMBERFN(void)::run(const unsigned int, const unsigned int, unsigned int)
-{
- execute(
- _n_output_channels, _n_input_channels, _weights,
- _matrices, _matrix_stride, _matrix_row_stride
- );
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp
deleted file mode 100644
index ed88098938..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-
-#include <algorithm>
-#include "winograd.hpp"
-#include "padding.hpp"
-#include "utils.hpp"
-
-#define MEMBERFN(RTYPE) template<\
- int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols,\
- typename TIn, typename TOut, WinogradRoots Roots\
-> RTYPE OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots>
-
-#define Nx1MEMBERFN(RTYPE) template<\
- int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\
-> RTYPE OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots>
-
-namespace winograd
-{
-
-MEMBERFN()
-::OutputTransform(const int n_batches, const int n_rows, const int n_cols,
- const int n_channels, const arm_gemm::Activation &activation)
- : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols),
- _n_channels(n_channels),
- _output_min((activation.type == arm_gemm::Activation::Type::ReLU ||
- activation.type == arm_gemm::Activation::Type::BoundedReLU)
- ? static_cast<TOut>(0.0f) : TypeBounds<TOut>::lower()),
- _output_max((activation.type == arm_gemm::Activation::Type::BoundedReLU)
- ? static_cast<TOut>(activation.param1) : TypeBounds<TOut>::upper()),
- _matrix_base(nullptr), _biases(nullptr), _matrix_stride(0),
- _matrix_row_stride(0), _matrix_batch_stride(0), _outptr(nullptr),
- _tiles_M(iceildiv(n_rows, output_tile_rows)),
- _tiles_N(iceildiv(n_cols, output_tile_cols)), _out_col_stride(0),
- _out_row_stride(0), _out_batch_stride(0),
- _working_space_col_stride(n_channels),
- _working_space_row_stride(output_tile_cols * _working_space_col_stride),
- _working_space(nullptr) {}
-
-MEMBERFN(void)::set_input_matrices(const void * const mptr, const int ldmatrix, const int ldrow)
-{
- _matrix_base = static_cast<const TIn *>(mptr);
- _matrix_stride = ldmatrix;
- _matrix_row_stride = ldrow;
- _matrix_batch_stride = _tiles_M * _tiles_N * ldrow;
-}
-
-MEMBERFN(void)::set_bias(const void * const bias)
-{
- _biases = static_cast<const TOut *>(bias);
-}
-
-MEMBERFN(void)::set_output_tensor(void * const outptr)
-{
- set_output_tensor(outptr, _n_channels);
-}
-
-MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol)
-{
- set_output_tensor(outptr, _n_cols * ldcol, ldcol);
-}
-
-MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol)
-{
- set_output_tensor(outptr, _n_rows * ldrow, ldrow, ldcol);
-}
-
-MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol)
-{
- _outptr = static_cast<TOut *>(outptr);
- _out_batch_stride = ldbatch;
- _out_row_stride = ldrow;
- _out_col_stride = ldcol;
-}
-
-Nx1MEMBERFN()::OutputTransform(
- const int n_batches,
- const int n_rows,
- const int n_cols,
- const int n_channels,
- const arm_gemm::Activation &activation
-) : OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::OutputTransform(
- n_batches, n_cols, n_rows, n_channels, activation /* Transpose rows and columns */
- )
-{
-}
-
-Nx1MEMBERFN(void)::set_output_tensor(void * const outptr)
-{
- set_output_tensor(outptr, this->_n_channels);
-}
-
-Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol)
-{
- set_output_tensor(outptr, this->_n_cols * ldcol, ldcol);
-}
-
-Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol)
-{
- set_output_tensor(outptr, this->_n_rows * ldrow, ldrow, ldcol);
-}
-
-Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol)
-{
- // Transpose rows and columns
- Base::set_output_tensor(outptr, ldbatch, ldcol, ldrow);
-}
-
-MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const
-{
- return sizeof(TOut) * output_tile_rows * _working_space_row_stride * nthreads;
-}
-
-MEMBERFN(void)::set_working_space(void * const buffer)
-{
- _working_space = static_cast<TOut *>(buffer);
-}
-
-MEMBERFN(unsigned int)::get_window(void) const
-{
- return iceildiv(_n_channels, WINDOW_BLOCK);
-}
-
-MEMBERFN(void)::run(
- const unsigned int start,
- const unsigned int stop,
- const unsigned int threadid
-)
-{
- // Determine the channels on which to work
- if (start >= get_window())
- {
- return; // No work to do beyond the end of the window
- }
- const unsigned int start_channel = start * WINDOW_BLOCK;
- const unsigned int stop_channel = std::min<unsigned int>(_n_channels, stop * WINDOW_BLOCK);
- const unsigned int n_channels = stop_channel - start_channel;
-
- const auto matrix_tile_col_stride = _matrix_row_stride;
- const auto matrix_tile_row_stride = _tiles_N * matrix_tile_col_stride;
-
- const TOut* const bptr = (_biases == nullptr) ? nullptr : _biases + start_channel;
-
- // Loop over batches
- for (int batch = 0; batch < _n_batches; batch++)
- {
- const TIn* const matrix_batch = _matrix_base + start_channel + batch * _matrix_batch_stride;
- TOut* const outptr_batch = _outptr + start_channel + batch * _out_batch_stride;
-
- for (int tile_i = 0; tile_i < _tiles_M; tile_i++)
- {
- // Compute properties of the row of output tiles
- const int row_pad_bottom = std::max(0, (tile_i + 1)*output_tile_rows - _n_rows);
- const TIn* const matrix_tile_row = matrix_batch + tile_i * matrix_tile_row_stride;
- TOut* const outptr_row = outptr_batch + tile_i * output_tile_rows * _out_row_stride;
-
- for (int tile_j = 0; tile_j < _tiles_N; tile_j++)
- {
- // Compute property of this specific tile
- const int tile_pad_right = std::max(0, (tile_j + 1)*output_tile_cols - _n_cols);
- const TIn* const matrix_tile = matrix_tile_row + tile_j * matrix_tile_col_stride;
- TOut* const outptr_tile = outptr_row + tile_j * output_tile_cols * _out_col_stride;
-
- // Perform the transformation
- if (row_pad_bottom || tile_pad_right)
- {
- transform_cropped_tile(
- threadid, n_channels, outptr_tile, matrix_tile, bptr,
- row_pad_bottom, tile_pad_right
- );
- }
- else
- {
- transform_uncropped_tile(
- threadid, n_channels, outptr_tile, matrix_tile, bptr
- );
- }
- }
- }
- }
-}
-
-MEMBERFN(void)::transform_uncropped_tile(
- const unsigned int /* threadid unused */,
- const int n_channels,
- TOut * const outptr,
- const TIn * const inptr,
- const TOut * const biases
-)
-{
- transform_tile(
- n_channels, inptr, _matrix_stride, biases,
- outptr, _out_row_stride, _out_col_stride,
- _output_min, _output_max
- );
-}
-
-MEMBERFN(void)::transform_cropped_tile(
- const unsigned int threadid,
- const int n_channels,
- TOut * const outptr,
- const TIn * const inptr,
- const TOut * const biases,
- const int pad_bottom,
- const int pad_right
-)
-{
- // Transform into working space and then copy the relevant section out.
- TOut *wsptr = static_cast<TOut *>(get_working_space(threadid));
- transform_tile(
- n_channels, inptr, _matrix_stride, biases,
- wsptr, _working_space_row_stride, _working_space_col_stride,
- _output_min, _output_max
- );
-
- padding::crop_and_copy_tile(
- output_tile_rows, output_tile_cols, n_channels,
- wsptr, _working_space_row_stride, _working_space_col_stride,
- outptr, _out_row_stride, _out_col_stride,
- 0u, 0u, pad_bottom, pad_right
- );
-}
-
-MEMBERFN(void *)::get_working_space(const unsigned int threadid) const
-{
- return _working_space + output_tile_rows * _working_space_row_stride * threadid;
-}
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
deleted file mode 100644
index f231bdd67e..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "output.hpp"
-
-namespace winograd
-{
-
-template <>
-void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* inptr,
- const int matrix_stride,
- const float* bptr,
- float* const output,
- const int, // No need to stride across rows
- const int output_col_stride,
- const float output_min,
- const float output_max
-)
-{
- // Construct a map to the output cells
- float *outptrs[output_tile_cols];
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[j] = output + j*output_col_stride;
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1q_f32(inptr + j*matrix_stride);
- }
- inptr += 4;
-
- f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = vminq_f32(vmaxq_f32(f[j] + b, vdupq_n_f32(output_min)),
- vdupq_n_f32(output_max));
- vst1q_f32(outptrs[j], y);
- outptrs[j] += 4;
- }
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1_f32(inptr + j*matrix_stride);
- }
- inptr += 2;
-
- f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = vmin_f32(vmax_f32(f[j] + b, vdup_n_f32(output_min)),
- vdup_n_f32(output_max));
- vst1_f32(outptrs[j], y);
- outptrs[j] += 2;
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = *(inptr + j*matrix_stride);
- }
- inptr++;
-
- f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
- f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1 + F[7]*1;
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = *(bptr++);
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- *(outptrs[j]++) = std::max(std::min(f[j] + b, output_max), output_min);
- }
- }
-}
-
-template class OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>;
-template class OutputTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp
deleted file mode 100644
index 5136bc15c4..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "output.hpp"
-
-namespace winograd
-{
-
-template <>
-void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* inptr,
- const int matrix_stride,
- const float* bptr,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const float output_min,
- const float output_max
-)
-{
- // Construct a map to the output cells
- float *outptrs[output_tile_rows][output_tile_cols];
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Load the bias vector
- if (bptr != nullptr)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- else
- {
- b = vdupq_n_f32(0.0f);
- }
-
- // Write out the output tile
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmaxq_f32(vminq_f32(vaddq_f32(f[i][j], b), vdupq_n_f32(output_max)),
- vdupq_n_f32(output_min));
- vst1q_f32(outptrs[i][j], y);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Load the bias vector
- if (bptr != nullptr)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- else
- {
- b = vdup_n_f32(0.0f);
- }
-
- // Write out the output tile
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmax_f32(vmin_f32(vadd_f32(f[i][j], b), vdup_n_f32(output_max)),
- vdup_n_f32(output_min));
- vst1_f32(outptrs[i][j], y);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- }
-
- // Load the bias
- if (bptr != nullptr)
- {
- b = *(bptr++);
- }
- else
- {
- b = 0.0f;
- }
-
- // Write out the output tile
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = std::max(std::min(f[i][j] + b, output_max), output_min);
- *(outptrs[i][j]++) = y;
- }
- }
- }
-}
-
-template class OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>;
-
-} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp
deleted file mode 100644
index 0f911f14a3..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "output.hpp"
-#include "arm.hpp"
-
-namespace winograd
-{
-
-template <>
-void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* inptr,
- const int matrix_stride,
- const float* bptr,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const float output_min,
- const float output_max
-)
-{
- // Construct a map to the output cells
- float *outptrs[output_tile_rows][output_tile_cols];
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- else
- {
- b = vdupq_n_f32(0.0f);
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmaxq_f32(vminq_f32(vaddq_f32(f[i][j], b), vdupq_n_f32(output_max)),
- vdupq_n_f32(output_min));
- vst1q_f32(outptrs[i][j], y);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- else
- {
- b = vdup_n_f32(0.0f);
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmax_f32(vmin_f32(vadd_f32(f[i][j], b), vdup_n_f32(output_max)),
- vdup_n_f32(output_min));
- vst1_f32(outptrs[i][j], y);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = *(bptr++);
- }
- else
- {
- b = 0.0f;
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = std::max(std::min(f[i][j] + b, output_max), output_min);
- *(outptrs[i][j]++) = y;
- }
- }
- }
-}
-
-template class OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
-
-} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
deleted file mode 100644
index 49a3f41182..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "output.hpp"
-#include "arm.hpp"
-
-namespace winograd
-{
-
-template <>
-void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* inptr,
- const int matrix_stride,
- const float* bptr,
- float* const output,
- const int, // No need to stride across rows
- const int output_col_stride,
- const float output_min,
- const float output_max
-)
-{
- // Construct a map to the output cells
- float *outptrs[output_tile_cols];
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[j] = output + j*output_col_stride;
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1q_f32(inptr + j*matrix_stride);
- }
- inptr += 4;
-
- f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmaxq_f32(vminq_f32(vaddq_f32(f[j], b), vdupq_n_f32(output_max)),
- vdupq_n_f32(output_min));
- vst1q_f32(outptrs[j], y);
- outptrs[j] += 4;
- }
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1_f32(inptr + j*matrix_stride);
- }
- inptr += 2;
-
- f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmax_f32(vmin_f32(vadd_f32(f[j], b), vdup_n_f32(output_max)),
- vdup_n_f32(output_min));
- vst1_f32(outptrs[j], y);
- outptrs[j] += 2;
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = *(inptr + j*matrix_stride);
- }
- inptr++;
-
- f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
- f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
- f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
- f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1 + F[7]*1;
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = *(bptr++);
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = std::max(std::min(f[j] + b, output_max), output_min);
- *(outptrs[j]++) = y;
- }
- }
-}
-
-template class OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>;
-template class OutputTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp16_fp16_integers.cpp
deleted file mode 100644
index 37b890d1bc..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp16_fp16_integers.cpp
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-#include "arm.hpp"
-#include "output.hpp"
-
-namespace winograd
-{
-
-template <>
-void winograd::OutputTransform<3, 3, 6, 6, __fp16, __fp16, winograd::WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const __fp16* inptr,
- const int matrix_stride,
- const __fp16* bptr,
- __fp16* const output,
- const int output_row_stride,
- const int output_col_stride,
- const __fp16 output_min,
- const __fp16 output_max
-)
-{
- // Construct a map to the output cells
- __fp16 *outptrs[output_tile_rows][output_tile_cols];
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-
-#ifdef __aarch64__
- for (; channels_remaining >= 8; channels_remaining -= 8)
- {
- // Matrices used and computed during this transform
- float16x8_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f16(inptr + m*matrix_stride);
- }
- }
- inptr += 8;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f16(vaddq_f16(vaddq_f16(F[i][0], F[i][1]), vaddq_f16(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vaddq_f16(vsubq_f16(F[i][1], F[i][2]), vmulq_f16(vsubq_f16(F[i][3], F[i][4]), vdupq_n_f16(2.0f)));
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vaddq_f16(vaddq_f16(F[i][1], F[i][2]), vmulq_f16(vaddq_f16(F[i][3], F[i][4]), vdupq_n_f16(4.0f)));
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vaddq_f16(vaddq_f16(vsubq_f16(F[i][1], F[i][2]), vmulq_f16(vsubq_f16(F[i][3], F[i][4]), vdupq_n_f16(8.0f))), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f16(vaddq_f16(vaddq_f16(FZ[0][j], FZ[1][j]), vaddq_f16(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vaddq_f16(vsubq_f16(FZ[1][j], FZ[2][j]), vmulq_f16(vsubq_f16(FZ[3][j], FZ[4][j]), vdupq_n_f16(2.0f)));
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vaddq_f16(vaddq_f16(FZ[1][j], FZ[2][j]), vmulq_f16(vaddq_f16(FZ[3][j], FZ[4][j]), vdupq_n_f16(4.0f)));
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vaddq_f16(vaddq_f16(vsubq_f16(FZ[1][j], FZ[2][j]), vmulq_f16(vsubq_f16(FZ[3][j], FZ[4][j]), vdupq_n_f16(8.0f))), FZ[5][j]);
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = vld1q_f16(bptr);
- bptr += 8;
- }
- else
- {
- b = vdupq_n_f16(0.0f);
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmaxq_f16(vminq_f16(vaddq_f16(f[i][j], b), vdupq_n_f16(output_max)),
- vdupq_n_f16(output_min));
- vst1q_f16(outptrs[i][j], y);
- outptrs[i][j] += 8;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float16x4_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f16(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f16(vadd_f16(vadd_f16(F[i][0], F[i][1]), vadd_f16(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vadd_f16(vsub_f16(F[i][1], F[i][2]), vmul_f16(vsub_f16(F[i][3], F[i][4]), vdup_n_f16(2.0f)));
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vadd_f16(vadd_f16(F[i][1], F[i][2]), vmul_f16(vadd_f16(F[i][3], F[i][4]), vdup_n_f16(4.0f)));
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vadd_f16(vadd_f16(vsub_f16(F[i][1], F[i][2]), vmul_f16(vsub_f16(F[i][3], F[i][4]), vdup_n_f16(8.0f))), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f16(vadd_f16(vadd_f16(FZ[0][j], FZ[1][j]), vadd_f16(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vadd_f16(vsub_f16(FZ[1][j], FZ[2][j]), vmul_f16(vsub_f16(FZ[3][j], FZ[4][j]), vdup_n_f16(2.0f)));
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vadd_f16(vadd_f16(FZ[1][j], FZ[2][j]), vmul_f16(vadd_f16(FZ[3][j], FZ[4][j]), vdup_n_f16(4.0f)));
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vadd_f16(vadd_f16(vsub_f16(FZ[1][j], FZ[2][j]), vmul_f16(vsub_f16(FZ[3][j], FZ[4][j]), vdup_n_f16(8.0f))), FZ[5][j]);
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = vld1_f16(bptr);
- bptr += 4;
- }
- else
- {
- b = vdup_n_f16(0.0f);
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmax_f16(vmin_f16(vadd_f16(f[i][j], b), vdup_n_f16(output_max)),
- vdup_n_f16(output_min));
- vst1_f16(outptrs[i][j], y);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- __fp16 F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = *(bptr++);
- }
- else
- {
- b = 0.0f;
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = std::max(std::min<__fp16>(f[i][j] + b, output_max), output_min);
- *(outptrs[i][j]++) = y;
- }
- }
- }
-}
-
-template class OutputTransform<3, 3, 6, 6, __fp16, __fp16, winograd::WinogradRoots::Integers>;
-
-} // namespace winograd
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp
deleted file mode 100644
index 292999c8bc..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "output.hpp"
-
-namespace winograd
-{
-
-template <>
-void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* inptr,
- const int matrix_stride,
- const float* bptr,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const float output_min,
- const float output_max
-)
-{
- // Construct a map to the output cells
- float *outptrs[output_tile_rows][output_tile_cols];
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- else
- {
- b = vdupq_n_f32(0.0f);
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmaxq_f32(vminq_f32(vaddq_f32(f[i][j], b), vdupq_n_f32(output_max)),
- vdupq_n_f32(output_min));
- vst1q_f32(outptrs[i][j], y);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- else
- {
- b = vdup_n_f32(0.0f);
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y =
- vmax_f32(vmin_f32(vadd_f32(f[i][j], b), vdup_n_f32(output_max)),
- vdup_n_f32(output_min));
- vst1_f32(outptrs[i][j], y);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- if (bptr != nullptr)
- {
- b = *(bptr++);
- }
- else
- {
- b = 0.0f;
- }
- for (int i = 0; i < output_tile_rows; i++)
- {
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = std::max(std::min(f[i][j] + b, output_max), output_min);
- *(outptrs[i][j]++) = y;
- }
- }
- }
-}
-
-template class OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
deleted file mode 100644
index 05f06a81ee..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "output.hpp"
-#include "arm.hpp"
-
-namespace winograd
-{
-
-template <>
-void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
- const int n_channels,
- const float* inptr,
- const int matrix_stride,
- const float* bptr,
- float* const output,
- const int, // No need to stride across rows
- const int output_col_stride,
- const float output_min,
- const float output_max
-)
-{
- // Construct a map to the output cells
- float *outptrs[output_tile_cols];
- for (int j = 0; j < output_tile_cols; j++)
- {
- outptrs[j] = output + j*output_col_stride;
- }
-
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1q_f32(inptr + j*matrix_stride);
- }
- inptr += 4;
-
- f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
- f[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 81), F[5], 81), F[4], 16), F[3], 16);
- f[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 243), F[4], 32), F[3], -32), F[5], -243), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1q_f32(bptr);
- bptr += 4;
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = vminq_f32(vmaxq_f32(f[j] + b, vdupq_n_f32(output_min)),
- vdupq_n_f32(output_max));
- vst1q_f32(outptrs[j], y);
- outptrs[j] += 4;
- }
- }
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = vld1_f32(inptr + j*matrix_stride);
- }
- inptr += 2;
-
- f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
- f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
- f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
- f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
- f[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 81), F[5], 81), F[4], 16), F[3], 16);
- f[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 243), F[4], 32), F[3], -32), F[5], -243), F[1], -1);
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = vld1_f32(bptr);
- bptr += 2;
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- const auto y = vmin_f32(vmax_f32(f[j] + b, vdup_n_f32(output_min)),
- vdup_n_f32(output_max));
- vst1_f32(outptrs[j], y);
- outptrs[j] += 2;
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
-
- // Read a 1x8 tile in the Winograd domain
- for (int j = 0; j < inner_tile_cols; j++)
- {
- F[j] = *(inptr + j*matrix_stride);
- }
- inptr++;
-
- f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
- f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
- f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
- f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1;
- f[4] = F[3]*16 + F[4]*16 + F[5]*81 + F[6]*81 + F[1]*1 + F[2]*1;
- f[5] = F[1]*-1 + F[5]*-243 + F[3]*-32 + F[4]*32 + F[6]*243 + F[2]*1 + F[7]*1;
-
- // Write out the output tile
- if (bptr != 0)
- {
- b = *(bptr++);
- }
- for (int j = 0; j < output_tile_cols; j++)
- {
- *(outptrs[j]++) = std::max(std::min(f[j] + b, output_max), output_min);
- }
- }
-}
-
-template class OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>;
-template class OutputTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp
deleted file mode 100644
index 37ae43fdb0..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[kernel_cols];
- for (int j = 0; j < kernel_cols; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[kernel_cols], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < kernel_cols; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36.0f;
- V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
- V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
- V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
- V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
- V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
- V[7] = (w[6]*1) / 1.0f;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>;
-template class WeightTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp
deleted file mode 100644
index 8fab6db1ba..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input,
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- constexpr int inner_tile_i = 4;
- constexpr int inner_tile_j = 4;
-
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const float *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
-
- // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
-
- // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
-
- // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
-
- // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
- Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
- V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>;
-
-} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp
deleted file mode 100644
index 79f4fa30c4..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input,
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 5 * weight_col_stride;
- const float *inptrs[5][5];
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- // Ww[0][j] = w[0][j]/4.0f;
- Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
-
- // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[1][j] = vmulq_n_f32(
- vaddq_f32(
- vaddq_f32(
- vaddq_f32(w[1][j], w[0][j]),
- vaddq_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- -1.0f/6.0f
- );
-
- // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
- Ww[2][j] = vmulq_n_f32(
- vsubq_f32(
- vaddq_f32(
- vsubq_f32(w[1][j], w[0][j]),
- vsubq_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- 1.0f/6.0f
- );
-
- // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[3][j] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
- vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
- vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[5][j] = w[4][j];
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- // V[i][0] = Ww[i][0]/4.0f;
- V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
-
- // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][1] = vmulq_n_f32(
- vaddq_f32(
- vaddq_f32(
- vaddq_f32(Ww[i][1], Ww[i][0]),
- vaddq_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- -1.0f/6.0f
- );
-
- // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
- V[i][2] = vmulq_n_f32(
- vsubq_f32(
- vaddq_f32(
- vsubq_f32(Ww[i][1], Ww[i][0]),
- vsubq_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- 1.0f/6.0f
- );
-
- // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][3] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
- vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
- vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][5] = Ww[i][4];
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- // Ww[0][j] = w[0][j]/4.0f;
- Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
-
- // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[1][j] = vmul_n_f32(
- vadd_f32(
- vadd_f32(
- vadd_f32(w[1][j], w[0][j]),
- vadd_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- -1.0f/6.0f
- );
-
- // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
- Ww[2][j] = vmul_n_f32(
- vsub_f32(
- vadd_f32(
- vsub_f32(w[1][j], w[0][j]),
- vsub_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- 1.0f/6.0f
- );
-
- // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[3][j] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
- vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
- vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[5][j] = w[4][j];
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- // V[i][0] = Ww[i][0]/4.0f;
- V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
-
- // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][1] = vmul_n_f32(
- vadd_f32(
- vadd_f32(
- vadd_f32(Ww[i][1], Ww[i][0]),
- vadd_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- -1.0f/6.0f
- );
-
- // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
- V[i][2] = vmul_n_f32(
- vsub_f32(
- vadd_f32(
- vsub_f32(Ww[i][1], Ww[i][0]),
- vsub_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- 1.0f/6.0f
- );
-
- // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][3] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
- vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
- vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][5] = Ww[i][4];
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- Ww[0][j] = w[0][j]/4.0f;
- Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = Ww[i][0]/4.0f;
- V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp
deleted file mode 100644
index fb3d712954..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[kernel_cols];
- for (int j = 0; j < kernel_cols; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[kernel_cols], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < kernel_cols; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36;
- V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
- V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
- V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
- V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
- V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
- V[7] = (w[4]*1) / 1;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>;
-template class WeightTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp16_fp16_integers.cpp
deleted file mode 100644
index 3c4f8b426c..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp16_fp16_integers.cpp
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (c) 2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const __fp16* const input, // NOTE: Data in HWIO order
- __fp16* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const __fp16 *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- __fp16 *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 8; channels_remaining -= 8)
- {
- // Matrices used and computed in this kernel
- float16x8_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f16(inptrs[i][j]);
- inptrs[i][j] += 8;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmulq_n_f16(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmulq_n_f16(vaddq_f16(vaddq_f16(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmulq_n_f16(vsubq_f16(vsubq_f16(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vaddq_f16(vaddq_f16(w[0][j], vmulq_f16(w[1][j], vdupq_n_f16(2.0f))), vmulq_f16(w[2][j], vdupq_n_f16(4.0f)));
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vaddq_f16(vsubq_f16(w[0][j], vmulq_f16(w[1][j], vdupq_n_f16(2.0f))), vmulq_f16(w[2][j], vdupq_n_f16(4.0f)));
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmulq_n_f16(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmulq_n_f16(vmulq_n_f16(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmulq_n_f16(vmulq_n_f16(vaddq_f16(vaddq_f16(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmulq_n_f16(vmulq_n_f16(vsubq_f16(vsubq_f16(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmulq_n_f16(vaddq_f16(vaddq_f16(Ww[i][0], vmulq_f16(Ww[i][1], vdupq_n_f16(2.0f))), vmulq_f16(Ww[i][2], vdupq_n_f16(4.0f))), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmulq_n_f16(vaddq_f16(vsubq_f16(Ww[i][0], vmulq_f16(Ww[i][1], vdupq_n_f16(2.0f))), vmulq_f16(Ww[i][2], vdupq_n_f16(4.0f))), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmulq_n_f16(vmulq_n_f16(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f16(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 8;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float16x4_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f16(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmul_n_f16(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmul_n_f16(vadd_f16(vadd_f16(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmul_n_f16(vsub_f16(vsub_f16(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vadd_f16(vadd_f16(w[0][j], vmul_f16(w[1][j], vdup_n_f16(2.0f))), vmul_f16(w[2][j], vdup_n_f16(4.0f)));
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vadd_f16(vsub_f16(w[0][j], vmul_f16(w[1][j], vdup_n_f16(2.0f))), vmul_f16(w[2][j], vdup_n_f16(4.0f)));
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmul_n_f16(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmul_n_f16(vmul_n_f16(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmul_n_f16(vmul_n_f16(vadd_f16(vadd_f16(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmul_n_f16(vmul_n_f16(vsub_f16(vsub_f16(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmul_n_f16(vadd_f16(vadd_f16(Ww[i][0], vmul_f16(Ww[i][1], vdup_n_f16(2.0f))), vmul_f16(Ww[i][2], vdup_n_f16(4.0f))), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmul_n_f16(vadd_f16(vsub_f16(Ww[i][0], vmul_f16(Ww[i][1], vdup_n_f16(2.0f))), vmul_f16(Ww[i][2], vdup_n_f16(4.0f))), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmul_n_f16(vmul_n_f16(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f16(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- __fp16 w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = 6*w[0][j];
- Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[5][j] = 24*w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = ( 6*Ww[i][0]) / 576.0;
- V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][5] = (24*Ww[i][2]) / 576.0;
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>;
-
-} // namespace
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp
deleted file mode 100644
index 9e7040bca8..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const float *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmulq_n_f32(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmul_n_f32(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmul_n_f32(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = 6*w[0][j];
- Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[5][j] = 24*w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = ( 6*Ww[i][0]) / 576.0;
- V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][5] = (24*Ww[i][2]) / 576.0;
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>;
-
-} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp
deleted file mode 100644
index 45723482a2..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm.hpp"
-#include "kernel.hpp"
-
-namespace winograd
-{
-
-template <>
-void WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
-)
-{
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[3];
- for (int j = 0; j < 3; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < 3; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36.0f;
- V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
- V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
- V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
- V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
- V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
- V[7] = (w[2]*1) / 1;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
-}
-
-template class WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>;
-template class WeightTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>;
-
-} // namespace