aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2019-03-27 09:28:32 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-16 11:31:40 +0000
commit8f43d745b170aefca269a087fc045d8af3813c33 (patch)
tree08df4a26c3fab575eb9bdf061be89d2a71fb3581
parent9e4824c909b14dbaf7106e9527b0ffa22ef09bdc (diff)
downloadComputeLibrary-8f43d745b170aefca269a087fc045d8af3813c33.tar.gz
COMPMID-2063: New Winograd implementation
Refactoring of winograd code reducing the size of the binaries about 8X. Change-Id: If8845bda324573e1a5cf436f354ac8603e88a92e Signed-off-by: Pablo Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/959 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Anthony Barbier <Anthony.barbier@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--SConscript2
-rw-r--r--arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h183
-rw-r--r--arm_compute/core/NEON/kernels/convolution/common/padding.hpp17
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp69
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/gemm.hpp127
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm.hpp355
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm_4x16.hpp1446
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp349
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp278
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd.hpp610
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp226
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_input_transform.hpp271
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_layer.hpp211
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp232
-rw-r--r--arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h4
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp142
-rw-r--r--src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp82
-rw-r--r--src/core/NEON/kernels/convolution/winograd/padding.cpp166
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp376
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp375
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp369
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp428
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp228
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp408
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp266
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp125
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd.cpp (renamed from src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp)104
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp265
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp (renamed from src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp)139
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp (renamed from src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp)152
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp1308
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp (renamed from arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp)67
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp249
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp (renamed from src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp)60
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp222
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp216
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp (renamed from src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp)62
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp1855
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp (renamed from src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp)66
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp90
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp220
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp401
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp90
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp257
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp90
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp41
48 files changed, 6714 insertions, 6833 deletions
diff --git a/SConscript b/SConscript
index 03976ea830..62b6073bf3 100644
--- a/SConscript
+++ b/SConscript
@@ -207,7 +207,7 @@ if env['neon']:
# build winograd sources for either v7a / v8a
core_files += Glob('src/core/NEON/kernels/convolution/*/*.cpp')
core_files += Glob('src/core/NEON/kernels/convolution/winograd/*/*.cpp')
- arm_compute_env.Append(CPPPATH = ["arm_compute/core/NEON/kernels/winograd/", "arm_compute/core/NEON/kernels/assembly/"])
+ arm_compute_env.Append(CPPPATH = ["arm_compute/core/NEON/kernels/convolution/winograd/","arm_compute/core/NEON/kernels/convolution/common/" , "arm_compute/core/NEON/kernels/assembly/"])
graph_files += Glob('src/graph/backends/NEON/*.cpp')
diff --git a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
index 96580053dd..f6b189cb1c 100644
--- a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,8 +27,7 @@
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_layer.hpp"
namespace arm_compute
{
@@ -39,6 +38,17 @@ template <typename T>
class INEWinogradLayerTransformInputKernel : public INEKernel
{
public:
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param num_threads The greatest number of threads that will be used to execute the transform.
+ * @return Size of working space required in bytes.
+ */
+ virtual unsigned int get_working_space_size(unsigned int num_threads) const = 0;
+
/** Determine how much memory (in units of TIn) to allocate for the
* transformed input.
*
@@ -72,9 +82,10 @@ public:
* @param[in] padding Padding type.
* @param[out] output Base of output matrices.
* @param[in] matrix_stride Stride between output matrices.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
*/
virtual void configure(const ITensor *input_nhwc, const int num_batches, const int num_rows, const int num_cols, const int num_channels,
- const PaddingType padding, ITensor *output, const int matrix_stride) = 0;
+ const PaddingType padding, ITensor *output, const int matrix_stride, ITensor *workspace) = 0;
/** Destructor */
virtual ~INEWinogradLayerTransformInputKernel()
@@ -116,6 +127,18 @@ public:
int num_cols,
bool same_padding) const override;
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param[in] num_threads The greatest number of threads that will be used to execute the transform.
+ *
+ * @return Size of working space required in bytes.
+ */
+ unsigned int get_working_space_size(unsigned int num_threads) const override;
+
/** Gets the stride between matrices in the input worspace
*
* @param[in] kernel_shape The shape of the weights tensor.
@@ -144,6 +167,7 @@ public:
* @param[in] padding Padding type.
* @param[out] output Base of output matrices.
* @param[in] matrix_stride Stride between output matrices.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
*/
void configure(
const ITensor *input_nhwc,
@@ -153,13 +177,14 @@ public:
const int num_channels,
const PaddingType padding,
ITensor *output,
- const int matrix_stride) override;
+ const int matrix_stride,
+ ITensor *workspace) override;
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
/** Winograd base kernel */
- using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
/** Winograd convolution kernel */
using WinogradConv = typename WinogradBase::template Convolution<T, T>;
@@ -174,15 +199,22 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
private:
- using InputTransform = typename WinogradBase::template InputTransform<T>;
- const ITensor *_input_nhwc;
- int _num_batches; /**< Number of batches in input tensor. */
- int _num_rows; /**< Number of rows in input tensor. */
- int _num_cols; /**< Number of columns in input tensor. */
- int _num_channels; /**< Number of channels in input tensor. */
- PaddingType _padding; /**< Padding type. */
- ITensor *_output; /**< Base of output matrices. */
- int _matrix_stride; /**< Stride between output matrices. */
+ using InputTransform = typename WinogradBase::template InputTransform<T, T>;
+
+ std::unique_ptr<InputTransform> _transform{ nullptr };
+ const ITensor *_input_nhwc;
+ int _num_batches; /**< Number of batches in input tensor. */
+ int _num_rows; /**< Number of rows in input tensor. */
+ int _num_cols; /**< Number of columns in input tensor. */
+ int _num_channels; /**< Number of channels in input tensor. */
+ PaddingType _padding; /**< Padding type. */
+ ITensor *_output; /**< Base of output matrices. */
+ int _matrix_stride; /**< Stride between output matrices. */
+ int _padding_top; /**< Padding to apply to the top of the image. */
+ int _padding_left; /**< Padding to apply to the left of the image. */
+ int _padding_right; /**< Padding to apply to the right of the image. */
+ int _padding_bottom; /**< Padding to apply to the bottom of the image. */
+ ITensor *_workspace;
};
/** Interface for the NEON kernel to perform Winograd output transform. */
@@ -190,6 +222,18 @@ template <typename T>
class INEWinogradLayerTransformOutputKernel : public INEKernel
{
public:
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param[in] num_threads The greatest number of threads that will be used to execute the transform.
+ *
+ * @return Size of working space required in bytes.
+ */
+ virtual unsigned int get_working_space_size(unsigned int num_threads) const = 0;
+
/** Determine how much memory (in units of TOut) to allocate for the
* (Winograd domain) output.
*
@@ -225,24 +269,26 @@ public:
/** Configure the output transform kernel.
*
- * @param[in] biases Pointer to the biases tensor.
- * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain.
- * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
- * @param[out] output_nhwc Pointer to a tensor in NHWC data layout ordered output tensor, in the spatial domain.
- * @param[in] num_batches Number of batches in the input tensor.
- * @param[in] num_rows Number of rows in output tensor.
- * @param[in] num_cols Number of columns in output tensor.
- * @param[in] num_channels Number of feature maps in the output tensor.
+ * @param[in] biases Pointer to the biases tensor.
+ * @param[in] transformed_output Pointer to working space for the output tensor in the Winograd domain.
+ * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
+ * @param[out] output_nhwc Pointer to a tensor in NHWC data layout ordered output tensor, in the spatial domain.
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_rows Number of rows in output tensor.
+ * @param[in] num_cols Number of columns in output tensor.
+ * @param[in] num_channels Number of feature maps in the output tensor.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
*/
virtual void configure(
const ITensor *biases,
- const ITensor *output_workingspace,
+ const ITensor *transformed_output,
const int matrix_stride,
ITensor *output_nhwc,
const int num_batches,
const int num_rows,
const int num_cols,
- const int num_channels) = 0;
+ const int num_channels,
+ ITensor *workspace) = 0;
virtual ~INEWinogradLayerTransformOutputKernel()
{
@@ -305,54 +351,70 @@ public:
*/
Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const override;
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param[in] num_threads The greatest number of threads that will be used to execute the transform.
+ *
+ * @return Size of working space required in bytes.
+ */
+ unsigned int get_working_space_size(unsigned int num_threads) const override;
+
/** Configure the output transform kernel.
*
- * @param[in] biases Pointer to the biases tensor.
- * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain.
- * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
- * @param[out] output_nhwc Pointer to a tensor with NHWC data layout, in the spatial domain.
- * @param[in] num_batches Number of batches in the input tensor.
- * @param[in] num_rows Number of rows in output tensor.
- * @param[in] num_cols Number of columns in output tensor.
- * @param[in] num_channels Number of feature maps in the output tensor.
+ * @param[in] biases Pointer to the biases tensor.
+ * @param[in] transformed_output Pointer to working space for the output tensor in the Winograd domain.
+ * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
+ * @param[out] output_nhwc Pointer to a tensor with NHWC data layout, in the spatial domain.
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_rows Number of rows in output tensor.
+ * @param[in] num_cols Number of columns in output tensor.
+ * @param[in] num_channels Number of feature maps in the output tensor.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
*/
void configure(
const ITensor *biases,
- const ITensor *output_workingspace,
+ const ITensor *transformed_output,
const int matrix_stride,
ITensor *output_nhwc,
const int num_batches,
const int num_rows,
const int num_cols,
- const int num_channels) override;
+ const int num_channels,
+ ITensor *workspace) override;
void run(const Window &window, const ThreadInfo &info) override;
/** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformOutputKernel
*
- * @param[in] input Source tensor with shape [C, N, 16, batches] or [C, N, 36, batches]. Data types supported: F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output Destination tensor with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
+ * @param[in] input Source tensor info with shape [C, N, 16, batches] or [C, N, 36, batches]. Data types supported: F32.
+ * @param[in] bias Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
+ * @param[in] output Destination tensor info with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info);
private:
- using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
using WinogradConv = typename WinogradBase::template Convolution<T, T>;
- using OutputTransform = typename WinogradBase::template OutputTransform<T>;
-
- const ITensor *_biases;
- const ITensor *_output_workspace;
- int _matrix_stride;
- int _matrix_row_stride;
- ITensor *_output_nhwc;
- int _num_batches;
- int _num_rows;
- int _num_cols;
- int _num_channels;
+ using OutputTransform = typename WinogradBase::template OutputTransform<T, T>;
+
+ std::unique_ptr<OutputTransform> _transform{ nullptr };
+ const ITensor *_biases;
+ const ITensor *_transformed_output;
+ ITensor *_workspace;
+ int _matrix_stride;
+ int _matrix_row_stride;
+ ITensor *_output_nhwc;
+ int _num_batches;
+ int _num_rows;
+ int _num_cols;
+ int _num_channels;
};
/** Interface for the NEON kernel to perform Winograd weights transform. */
@@ -482,15 +544,16 @@ public:
bool is_parallelisable() const override;
private:
- using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
using WinogradConv = typename WinogradBase::template Convolution<T, T>;
- using WeightsTransform = typename WinogradBase::template WeightsTransform<T>;
-
- const ITensor *_weights_hwio;
- ITensor *_output;
- int _matrix_stride;
- int _num_output_channels;
- int _num_input_channels;
+ using WeightsTransform = typename WinogradBase::template WeightsTransform<T, T>;
+
+ std::unique_ptr<WeightsTransform> _transform{ nullptr };
+ const ITensor *_weights_hwio;
+ ITensor *_output;
+ int _matrix_stride;
+ int _num_output_channels;
+ int _num_input_channels;
};
/** NEON kernel to perform Winograd. */
@@ -499,7 +562,7 @@ class NEWinogradLayerConfiguration
{
public:
/** Winograd base kernel */
- using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
/** Winograd convolution kernel */
using WinogradConv = typename WinogradBase::template Convolution<TIn, TOut>;
diff --git a/arm_compute/core/NEON/kernels/convolution/common/padding.hpp b/arm_compute/core/NEON/kernels/convolution/common/padding.hpp
index 33f77d7ee9..97b21e0ff5 100644
--- a/arm_compute/core/NEON/kernels/convolution/common/padding.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/common/padding.hpp
@@ -71,4 +71,21 @@ class CopyCropped
);
};
+template <typename T>
+void crop_and_copy_tile(
+ unsigned int tile_rows,
+ unsigned int tile_cols,
+ unsigned int n_channels,
+ const T *inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ T *outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride,
+ unsigned int crop_top,
+ unsigned int crop_left,
+ unsigned int crop_bottom,
+ unsigned int crop_right
+);
+
}
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp
deleted file mode 100644
index 663b3c414f..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-
-namespace winograd
-{
-
-template <const int M_BLOCK, const int N_BLOCK, typename TIn, typename TOut>
-class BatchedBlockedGemm
-{
- public:
- /** Create a new batched blocked GEMM operator. */
- BatchedBlockedGemm(
- const unsigned int n_gemms,
- const int M, const int K, const int N,
- const int a_matrix_stride,
- const int a_row_stride,
- const int b_matrix_stride,
- const int b_row_stride,
- const int c_matrix_stride,
- const int c_row_stride,
- const TIn* const a_ptr,
- const TIn* const b_ptr,
- TOut* const c_ptr
- );
-
- BatchedBlockedGemm(const BatchedBlockedGemm&) = delete;
- BatchedBlockedGemm operator=(const BatchedBlockedGemm&) = delete;
-
- /** Get a window of work performed by the operator. */
- unsigned int get_window() const;
-
- /** Perform a portion of the work of the operator. */
- void run(const unsigned int start, const unsigned int stop);
-
- private:
- const unsigned int n_gemms;
- const int M, N, K;
- const int a_matrix_stride, a_row_stride;
- const int b_matrix_stride, b_row_stride;
- const int c_matrix_stride, c_row_stride;
- const TIn* const a_ptr;
- const TIn* const b_ptr;
- TOut* const c_ptr;
-};
-
-} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/gemm.hpp
deleted file mode 100644
index 6e06db324c..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/gemm.hpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
-
-template <typename TIn, typename TOut>
-inline void Gemm(const TIn* const a, const TIn* const b, TOut *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride,
- const bool a_transposed=false,
- const bool b_transposed=false) {
- // Array access methods
- const auto A = [a, a_transposed, M, K, a_row_stride] (const int i, const int j) -> TIn {
- return a[(!a_transposed) ? i*a_row_stride + j : i + j*M];
- };
-
- const auto B = [b, b_transposed, K, N, b_row_stride] (const int i, const int j) -> TIn {
- return b[(!b_transposed) ? i*b_row_stride + j : i + j*N];
- };
-
- const auto C = [c, c_row_stride] (const int i, const int j) -> TOut& {
- return c[i*c_row_stride + j];
- };
-
- // Perform the matrix multiplication
- for (int i = 0; i < M; i++) {
- for (int j = 0; j < N; j++) {
- for (int k = 0; k < K; k++) {
- C(i, j) += A(i, k) * B(k, j);
- }
- }
- }
-}
-
-template <const int M_BLOCK, const int N_BLOCK, typename TIn, typename TOut>
-inline void BlockedGemm(
- const TIn* const a, const TIn* const b, TOut *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- // Array access methods
- const auto A = [a, a_row_stride] (const int i, const int j) -> TIn {
- return a[i*a_row_stride + j];
- };
-
- const auto B = [b, b_row_stride] (const int i, const int j) -> TIn {
- return b[i*b_row_stride + j];
- };
-
- const auto C = [c, c_row_stride] (const int i, const int j) -> TOut& {
- return c[i*c_row_stride + j];
- };
-
- const int M_BLOCKS = iceildiv(M, M_BLOCK);
- const int N_BLOCKS = iceildiv(N, N_BLOCK);
-
- // For each block of output rows
- for (int mblock = 0; mblock < M_BLOCKS; mblock++) {
- // For each block of output columns
- for (int nblock = 0; nblock < N_BLOCKS; nblock++) {
- // Create an appropriately sized block of accumulators
- TOut accum[M_BLOCK][N_BLOCK];
- for (int i = 0; i < M_BLOCK; i++) {
- for (int j = 0; j < N_BLOCK; j++) {
- accum[i][j] = static_cast<TOut>(0);
- }
- }
-
- // Perform this portion of the matrix multiply
- for (int k = 0; k < K; k++) {
- // Load elements of A
- TIn elems_a[M_BLOCK];
- for (int i = 0; i < M_BLOCK; i++) {
- elems_a[i] = A(mblock*M_BLOCK + i, k);
- }
-
- // Load elements of B
- TIn elems_b[N_BLOCK];
- for (int j = 0; j < N_BLOCK; j++) {
- elems_b[j] = B(k, nblock*N_BLOCK + j);
- }
-
- // Perform the partial matrix multiply
- for (int i = 0; i < M_BLOCK; i++) {
- for (int j = 0; j < N_BLOCK; j++) {
- accum[i][j] += elems_a[i] * elems_b[j];
- }
- }
- }
-
- // Store the partial product
- for (int i = 0; i < M_BLOCK; i++) {
- for (int j = 0; j < N_BLOCK; j++) {
- C(mblock*M_BLOCK + i, nblock*N_BLOCK + j) = accum[i][j];
- }
- }
- }
- }
-}
-
-#include "gemm/a64_sgemm.hpp"
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm.hpp
deleted file mode 100644
index 8073cb1896..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm.hpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-#include <cassert>
-#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
-
-#ifdef __aarch64__
-
-template <>
-inline void BlockedGemm<8, 12, float, float>(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- const int M_BLOCK = 8;
- const int N_BLOCK = 12;
-
- const int m_blocks = iceildiv(M, M_BLOCK);
- const int n_blocks = iceildiv(N, N_BLOCK);
-
- // For each block of output rows
- for (int mblock = 0; mblock < m_blocks; mblock++) {
- // For each block of output columns
- for (int nblock = 0; nblock < n_blocks; nblock++) {
- const float *aptr = a + mblock*M_BLOCK*a_row_stride;
- const float *bptr = b + nblock*N_BLOCK;
- float *cptr = c + mblock*M_BLOCK*c_row_stride + nblock*N_BLOCK;
- int k = K;
-
- asm volatile (
- // Create an 8x12 block of accumulators
- " A_1 .req v27\n"
- "sA_1 .req s27\n"
- " A_2 .req v28\n"
- "sA_2 .req s28\n"
- " A_3 .req v29\n"
- "sA_3 .req s29\n"
- " A_4 .req v30\n"
- "sA_4 .req s30\n"
-
- " B_1 .req v24\n" " B_2 .req v25\n" " B_3 .req v26\n"
- "qB_1 .req q24\n" "qB_2 .req q25\n" "qB_3 .req q26\n"
-
- " C_11 .req v0\n" " C_12 .req v1\n" " C_13 .req v2\n"
- " C_21 .req v3\n" " C_22 .req v4\n" " C_23 .req v5\n"
- " C_31 .req v6\n" " C_32 .req v7\n" " C_33 .req v8\n"
- " C_41 .req v9\n" " C_42 .req v10\n" " C_43 .req v11\n"
- " C_51 .req v12\n" " C_52 .req v13\n" " C_53 .req v14\n"
- " C_61 .req v15\n" " C_62 .req v16\n" " C_63 .req v17\n"
- " C_71 .req v18\n" " C_72 .req v19\n" " C_73 .req v20\n"
- " C_81 .req v21\n" " C_82 .req v22\n" " C_83 .req v23\n"
-
- "qC_11 .req q0\n" "qC_12 .req q1\n" "qC_13 .req q2\n"
- "qC_21 .req q3\n" "qC_22 .req q4\n" "qC_23 .req q5\n"
- "qC_31 .req q6\n" "qC_32 .req q7\n" "qC_33 .req q8\n"
- "qC_41 .req q9\n" "qC_42 .req q10\n" "qC_43 .req q11\n"
- "qC_51 .req q12\n" "qC_52 .req q13\n" "qC_53 .req q14\n"
- "qC_61 .req q15\n" "qC_62 .req q16\n" "qC_63 .req q17\n"
- "qC_71 .req q18\n" "qC_72 .req q19\n" "qC_73 .req q20\n"
- "qC_81 .req q21\n" "qC_82 .req q22\n" "qC_83 .req q23\n"
-
- "aptr1 .req x17\n"
- "aptr2 .req x18\n"
- "aptr3 .req x19\n"
- "aptr4 .req x20\n"
- "aptr5 .req x21\n"
- "aptr6 .req x22\n"
- "aptr7 .req x23\n"
-
- // Initialise accumulators with 0
- // Initialise pointers
- "movi C_11.4s, #0\n"
- "add aptr1, %x[aptr], %x[a_row_stride]\n"
- "movi C_12.4s, #0\n"
- "add aptr2, aptr1, %x[a_row_stride]\n"
- "movi C_13.4s, #0\n"
- "add aptr3, aptr2, %x[a_row_stride]\n"
- "movi C_21.4s, #0\n"
- "add aptr4, aptr3, %x[a_row_stride]\n"
- "movi C_22.4s, #0\n"
- "add aptr5, aptr4, %x[a_row_stride]\n"
- "movi C_23.4s, #0\n"
- "add aptr6, aptr5, %x[a_row_stride]\n"
- "movi C_31.4s, #0\n"
- "add aptr7, aptr6, %x[a_row_stride]\n"
- "movi C_32.4s, #0\n"
- "ldr qB_1, [%x[bptr]]\n"
- "movi C_33.4s, #0\n"
- "ldr qB_2, [%x[bptr], #0x10]\n"
- "movi C_41.4s, #0\n"
- "prfm pldl1keep, [%x[bptr], #0x00]\n"
- "movi C_42.4s, #0\n"
- "prfm pldl1keep, [%x[bptr], #0x10]\n"
- "movi C_43.4s, #0\n"
- "prfm pldl1keep, [%x[bptr], #0x20]\n"
- "movi C_51.4s, #0\n"
- "prfm pldl1keep, [%x[aptr], #0x00]\n"
- "movi C_52.4s, #0\n"
- "prfm pldl1keep, [ aptr1, #0x00]\n"
- "movi C_53.4s, #0\n"
- "prfm pldl1keep, [ aptr2, #0x00]\n"
- "movi C_61.4s, #0\n"
- "prfm pldl1keep, [ aptr3, #0x00]\n"
- "movi C_62.4s, #0\n"
- "prfm pldl1keep, [ aptr4, #0x00]\n"
- "movi C_63.4s, #0\n"
- "prfm pldl1keep, [ aptr5, #0x00]\n"
- "movi C_71.4s, #0\n"
- "prfm pldl1keep, [ aptr6, #0x00]\n"
- "movi C_72.4s, #0\n"
- "prfm pldl1keep, [ aptr7, #0x00]\n"
- "movi C_73.4s, #0\n"
- "ldr sA_1, [%x[aptr]], #0x4\n"
- "movi C_81.4s, #0\n"
- "ldr sA_2, [ aptr1], #0x4\n"
- "movi C_82.4s, #0\n"
- "ldr sA_3, [ aptr2], #0x4\n"
- "movi C_83.4s, #0\n"
- "subs %x[k], %x[k], #1\n"
- "beq 2f\n"
-
- "1:"
- "fmla C_11.4s, B_1.4s, A_1.s[0]\n"
- "ldr qB_3, [%x[bptr], #0x20]\n"
- "fmla C_12.4s, B_2.4s, A_1.s[0]\n"
- "ldr sA_4, [ aptr3], #0x4\n"
- "fmla C_13.4s, B_3.4s, A_1.s[0]\n"
- "ldr sA_1, [ aptr4], #0x04\n"
-
- "fmla C_21.4s, B_1.4s, A_2.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride]\n"
- "fmla C_22.4s, B_2.4s, A_2.s[0]\n"
- "prfm pldl1keep, [ aptr3, #0x10]\n"
- "fmla C_23.4s, B_3.4s, A_2.s[0]\n"
- "ldr sA_2, [ aptr5], #0x04\n"
-
- "fmla C_31.4s, B_1.4s, A_3.s[0]\n"
- "prfm pldl1keep, [%x[bptr], #0x00]\n"
- "fmla C_32.4s, B_2.4s, A_3.s[0]\n"
- "prfm pldl1keep, [%x[bptr], #0x10]\n"
- "fmla C_33.4s, B_3.4s, A_3.s[0]\n"
- "ldr sA_3, [ aptr6], #0x04\n"
-
- "fmla C_41.4s, B_1.4s, A_4.s[0]\n"
- "prfm pldl1keep, [%x[bptr], #0x20]\n"
- "fmla C_42.4s, B_2.4s, A_4.s[0]\n"
- "prfm pldl1keep, [ aptr4, #0x10]\n"
- "fmla C_43.4s, B_3.4s, A_4.s[0]\n"
- "ldr sA_4, [ aptr7], #0x04\n"
-
- "fmla C_51.4s, B_1.4s, A_1.s[0]\n"
- "prfm pldl1keep, [ aptr5, #0x10]\n"
- "fmla C_52.4s, B_2.4s, A_1.s[0]\n"
- "prfm pldl1keep, [ aptr6, #0x10]\n"
- "fmla C_53.4s, B_3.4s, A_1.s[0]\n"
- "ldr sA_1, [%x[aptr]], #0x04\n"
-
- "fmla C_61.4s, B_1.4s, A_2.s[0]\n"
- "prfm pldl1keep, [ aptr7, #0x10]\n"
- "fmla C_62.4s, B_2.4s, A_2.s[0]\n"
- "subs %x[k], %x[k], #1\n"
- "fmla C_63.4s, B_3.4s, A_2.s[0]\n"
- "ldr sA_2, [ aptr1], #0x04\n"
-
- "fmla C_71.4s, B_1.4s, A_3.s[0]\n"
- "prfm pldl1keep, [%x[aptr], #0x10]\n"
- "fmla C_72.4s, B_2.4s, A_3.s[0]\n"
- "prfm pldl1keep, [ aptr1, #0x10]\n"
- "fmla C_73.4s, B_3.4s, A_3.s[0]\n"
- "ldr sA_3, [ aptr2], #0x04\n"
-
- "fmla C_81.4s, B_1.4s, A_4.s[0]\n"
- "prfm pldl1keep, [ aptr2, #0x10]\n"
- "fmla C_82.4s, B_2.4s, A_4.s[0]\n"
- "ldp qB_1, qB_2, [%x[bptr]]\n"
- "fmla C_83.4s, B_3.4s, A_4.s[0]\n"
- "bne 1b\n"
-
- "2:"
- "fmla C_11.4s, B_1.4s, A_1.s[0]\n"
- "ldr qB_3, [%x[bptr], #0x20]\n"
- "fmla C_12.4s, B_2.4s, A_1.s[0]\n"
- "stp qC_11, qC_12, [%x[cptr]]\n"
- "fmla C_13.4s, B_3.4s, A_1.s[0]\n"
- "str qC_13, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
- "ldr sA_1, [ aptr4], #0x04\n"
-
- "fmla C_21.4s, B_1.4s, A_2.s[0]\n"
- "ldr sA_4, [ aptr3], #0x4\n"
- "fmla C_22.4s, B_2.4s, A_2.s[0]\n"
- "stp qC_21, qC_22, [%x[cptr]]\n"
- "fmla C_23.4s, B_3.4s, A_2.s[0]\n"
- "str qC_23, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
- "ldr sA_2, [ aptr5], #0x04\n"
-
- "fmla C_31.4s, B_1.4s, A_3.s[0]\n"
- "fmla C_32.4s, B_2.4s, A_3.s[0]\n"
- "stp qC_31, qC_32, [%x[cptr]]\n"
- "fmla C_33.4s, B_3.4s, A_3.s[0]\n"
- "str qC_33, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
- "ldr sA_3, [ aptr6], #0x04\n"
-
- "fmla C_41.4s, B_1.4s, A_4.s[0]\n"
- "fmla C_42.4s, B_2.4s, A_4.s[0]\n"
- "stp qC_41, qC_42, [%x[cptr]]\n"
- "fmla C_43.4s, B_3.4s, A_4.s[0]\n"
- "str qC_43, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
- "ldr sA_4, [ aptr7], #0x04\n"
-
- "fmla C_51.4s, B_1.4s, A_1.s[0]\n"
- "fmla C_52.4s, B_2.4s, A_1.s[0]\n"
- "stp qC_51, qC_52, [%x[cptr]]\n"
- "fmla C_53.4s, B_3.4s, A_1.s[0]\n"
- "str qC_53, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
-
- "fmla C_61.4s, B_1.4s, A_2.s[0]\n"
- "fmla C_62.4s, B_2.4s, A_2.s[0]\n"
- "stp qC_61, qC_62, [%x[cptr]]\n"
- "fmla C_63.4s, B_3.4s, A_2.s[0]\n"
- "str qC_63, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
-
- "fmla C_71.4s, B_1.4s, A_3.s[0]\n"
- "fmla C_72.4s, B_2.4s, A_3.s[0]\n"
- "stp qC_71, qC_72, [%x[cptr]]\n"
- "fmla C_73.4s, B_3.4s, A_3.s[0]\n"
- "str qC_73, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
-
- "fmla C_81.4s, B_1.4s, A_4.s[0]\n"
- "fmla C_82.4s, B_2.4s, A_4.s[0]\n"
- "stp qC_81, qC_82, [%x[cptr]]\n"
- "fmla C_83.4s, B_3.4s, A_4.s[0]\n"
- "str qC_83, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride]\n"
-
- // Clear aliases
- ".unreq aptr1\n"
- ".unreq aptr2\n"
- ".unreq aptr3\n"
- ".unreq aptr4\n"
- ".unreq aptr5\n"
- ".unreq aptr6\n"
- ".unreq aptr7\n"
-
- ".unreq A_1\n" ".unreq A_2\n" ".unreq A_3\n" ".unreq A_4\n"
- ".unreq sA_1\n" ".unreq sA_2\n" ".unreq sA_3\n" ".unreq sA_4\n"
-
- ".unreq B_1\n" ".unreq B_2\n" ".unreq B_3\n"
- ".unreq qB_1\n" ".unreq qB_2\n" ".unreq qB_3\n"
-
- ".unreq C_11\n" ".unreq C_12\n" ".unreq C_13\n"
- ".unreq C_21\n" ".unreq C_22\n" ".unreq C_23\n"
- ".unreq C_31\n" ".unreq C_32\n" ".unreq C_33\n"
- ".unreq C_41\n" ".unreq C_42\n" ".unreq C_43\n"
- ".unreq C_51\n" ".unreq C_52\n" ".unreq C_53\n"
- ".unreq C_61\n" ".unreq C_62\n" ".unreq C_63\n"
- ".unreq C_71\n" ".unreq C_72\n" ".unreq C_73\n"
- ".unreq C_81\n" ".unreq C_82\n" ".unreq C_83\n"
-
- ".unreq qC_11\n" ".unreq qC_12\n" ".unreq qC_13\n"
- ".unreq qC_21\n" ".unreq qC_22\n" ".unreq qC_23\n"
- ".unreq qC_31\n" ".unreq qC_32\n" ".unreq qC_33\n"
- ".unreq qC_41\n" ".unreq qC_42\n" ".unreq qC_43\n"
- ".unreq qC_51\n" ".unreq qC_52\n" ".unreq qC_53\n"
- ".unreq qC_61\n" ".unreq qC_62\n" ".unreq qC_63\n"
- ".unreq qC_71\n" ".unreq qC_72\n" ".unreq qC_73\n"
- ".unreq qC_81\n" ".unreq qC_82\n" ".unreq qC_83\n"
- : [aptr] "+r" (aptr),
- [bptr] "+r" (bptr),
- [cptr] "+r" (cptr),
- [k] "+r" (k)
- : [a_row_stride] "r" (a_row_stride * sizeof(float)),
- [b_row_stride] "r" (b_row_stride * sizeof(float)),
- [c_row_stride] "r" (c_row_stride * sizeof(float))
- : "cc", "memory",
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
- "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
- "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28",
- "v29", "v30", "x17", "x18", "x19", "x20", "x21", "x22", "x23"
- );
- }
- }
-}
-
-/*****************************************************************************/
-/* 4x16 blocked GEMM with specialised tails
- */
-#include "a64_sgemm_4x16.hpp"
-
-template <>
-inline void BlockedGemm<4, 16, float, float>(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- // Despatch based on tail of K
- switch (K % 4) {
- case 3:
- sgemm_4x16_impl<3>(
- a, b, c, M, K, N, a_row_stride, b_row_stride, c_row_stride
- );
- break;
- case 2:
- sgemm_4x16_impl<2>(
- a, b, c, M, K, N, a_row_stride, b_row_stride, c_row_stride
- );
- break;
- case 1:
- sgemm_4x16_impl<1>(
- a, b, c, M, K, N, a_row_stride, b_row_stride, c_row_stride
- );
- break;
- case 0:
- sgemm_4x16_impl<0>(
- a, b, c, M, K, N, a_row_stride, b_row_stride, c_row_stride
- );
- break;
- default:
- assert(false);
- }
-}
-
-#endif // __aarch64__
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm_4x16.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm_4x16.hpp
deleted file mode 100644
index 5cd37de7a0..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/gemm/a64_sgemm_4x16.hpp
+++ /dev/null
@@ -1,1446 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-template <const unsigned int tail>
-inline void sgemm_4x16_impl(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-);
-
-template <>
-inline void sgemm_4x16_impl<0>(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- const int TAIL_SIZE = 0;
- const int M_BLOCK = 4;
- const int N_BLOCK = 16;
-
- const int m_blocks = iceildiv(M, M_BLOCK);
- const int n_blocks = iceildiv(N, N_BLOCK);
-
- // For each block of output rows
- for (int mblock = 0; mblock < m_blocks; mblock++) {
- // For each block of output columns
- for (int nblock = 0; nblock < n_blocks; nblock++) {
- const float *aptr = a + mblock*M_BLOCK*a_row_stride;
- const float *bptr = b + nblock*N_BLOCK;
- float *cptr = c + mblock*M_BLOCK*c_row_stride + nblock*N_BLOCK;
- int k = (K - TAIL_SIZE) / 4;
-
- asm volatile(
- "aptr2 .req X20\n"
- "aptr3 .req X21\n"
- "aptr4 .req X22\n"
- "vC11 .req v0\n" "vC12 .req v1\n" "vC13 .req v2\n" "vC14 .req v3\n"
- "qC11 .req q0\n" "qC12 .req q1\n" "qC13 .req q2\n" "qC14 .req q3\n"
- "vC21 .req v4\n" "vC22 .req v5\n" "vC23 .req v6\n" "vC24 .req v7\n"
- "qC21 .req q4\n" "qC22 .req q5\n" "qC23 .req q6\n" "qC24 .req q7\n"
- "vC31 .req v8\n" "vC32 .req v9\n" "vC33 .req v10\n" "vC34 .req v11\n"
- "qC31 .req q8\n" "qC32 .req q9\n" "qC33 .req q10\n" "qC34 .req q11\n"
- "vC41 .req v12\n" "vC42 .req v13\n" "vC43 .req v14\n" "vC44 .req v15\n"
- "qC41 .req q12\n" "qC42 .req q13\n" "qC43 .req q14\n" "qC44 .req q15\n"
- "vA1 .req v16\n" "qA1 .req q16\n" "dA1 .req d16\n" "sA1 .req s16\n"
- "vA2 .req v17\n" "qA2 .req q17\n" "dA2 .req d17\n" "sA2 .req s17\n"
- "vA3 .req v18\n" "qA3 .req q18\n" "dA3 .req d18\n" "sA3 .req s18\n"
- "vA4 .req v19\n" "qA4 .req q19\n" "dA4 .req d19\n" "sA4 .req s19\n"
- "vB1 .req v20\n" "qB1 .req q20\n"
- "vB2 .req v21\n" "qB2 .req q21\n"
- "vB3 .req v22\n" "qB3 .req q22\n"
- "vB4 .req v23\n" "qB4 .req q23\n"
-
- // Clear accumulators, initialise pointers
- "movi vC11.4s, #0\n"
- "add aptr2, %x[aptr], %x[a_row_stride_bytes]\n"
- "movi vC12.4s, #0\n"
- "add aptr3, aptr2, %x[a_row_stride_bytes]\n"
- "movi vC13.4s, #0\n"
- "add aptr4, aptr3, %x[a_row_stride_bytes]\n"
- "movi vC14.4s, #0\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "movi vC21.4s, #0\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "movi vC22.4s, #0\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "movi vC23.4s, #0\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "movi vC24.4s, #0\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "movi vC31.4s, #0\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "subs %x[k], %x[k], #1\n"
- "beq 2f\n"
-
- "1:" // Loop proper
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "subs %x[k], %x[k], #1\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
- "bne 1b\n"
-
- "2:" // Tail
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "stp qC11, qC12, [%x[cptr], #0x00]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "stp qC13, qC14, [%x[cptr], #0x20]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "stp qC21, qC22, [%x[cptr], #0x00]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "stp qC23, qC24, [%x[cptr], #0x20]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "stp qC31, qC32, [%x[cptr], #0x00]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "stp qC33, qC34, [%x[cptr], #0x20]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "stp qC41, qC42, [%x[cptr], #0x00]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
- "stp qC43, qC44, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
-
- ".unreq vB4\n" ".unreq qB4\n"
- ".unreq vB3\n" ".unreq qB3\n"
- ".unreq vB2\n" ".unreq qB2\n"
- ".unreq vB1\n" ".unreq qB1\n"
- ".unreq vA4\n" ".unreq qA4\n" ".unreq dA4\n" ".unreq sA4\n"
- ".unreq vA3\n" ".unreq qA3\n" ".unreq dA3\n" ".unreq sA3\n"
- ".unreq vA2\n" ".unreq qA2\n" ".unreq dA2\n" ".unreq sA2\n"
- ".unreq vA1\n" ".unreq qA1\n" ".unreq dA1\n" ".unreq sA1\n"
- ".unreq qC41\n" ".unreq qC42\n" ".unreq qC43\n" ".unreq qC44\n"
- ".unreq vC41\n" ".unreq vC42\n" ".unreq vC43\n" ".unreq vC44\n"
- ".unreq qC31\n" ".unreq qC32\n" ".unreq qC33\n" ".unreq qC34\n"
- ".unreq vC31\n" ".unreq vC32\n" ".unreq vC33\n" ".unreq vC34\n"
- ".unreq qC21\n" ".unreq qC22\n" ".unreq qC23\n" ".unreq qC24\n"
- ".unreq vC21\n" ".unreq vC22\n" ".unreq vC23\n" ".unreq vC24\n"
- ".unreq qC11\n" ".unreq qC12\n" ".unreq qC13\n" ".unreq qC14\n"
- ".unreq vC11\n" ".unreq vC12\n" ".unreq vC13\n" ".unreq vC14\n"
- ".unreq aptr2\n"
- ".unreq aptr3\n"
- ".unreq aptr4\n"
-
- : [aptr] "+r" (aptr),
- [bptr] "+r" (bptr),
- [cptr] "+r" (cptr),
- [k] "+r" (k)
- : [a_row_stride_bytes] "r" (a_row_stride * sizeof(float)),
- [b_row_stride_bytes] "r" (b_row_stride * sizeof(float)),
- [c_row_stride_bytes] "r" (c_row_stride * sizeof(float))
- : "cc", "memory", "x20", "x21", "x22",
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
- "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
- "v21", "v22", "v23"
- );
- }
- }
-}
-
-template <>
-inline void sgemm_4x16_impl<1>(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- const int TAIL_SIZE = 1;
- const int M_BLOCK = 4;
- const int N_BLOCK = 16;
-
- const int m_blocks = iceildiv(M, M_BLOCK);
- const int n_blocks = iceildiv(N, N_BLOCK);
-
- // For each block of output rows
- for (int mblock = 0; mblock < m_blocks; mblock++) {
- // For each block of output columns
- for (int nblock = 0; nblock < n_blocks; nblock++) {
- const float *aptr = a + mblock*M_BLOCK*a_row_stride;
- const float *bptr = b + nblock*N_BLOCK;
- float *cptr = c + mblock*M_BLOCK*c_row_stride + nblock*N_BLOCK;
- int k = (K - TAIL_SIZE) / 4;
-
- asm volatile(
- "aptr2 .req X20\n"
- "aptr3 .req X21\n"
- "aptr4 .req X22\n"
- "vC11 .req v0\n" "vC12 .req v1\n" "vC13 .req v2\n" "vC14 .req v3\n"
- "qC11 .req q0\n" "qC12 .req q1\n" "qC13 .req q2\n" "qC14 .req q3\n"
- "vC21 .req v4\n" "vC22 .req v5\n" "vC23 .req v6\n" "vC24 .req v7\n"
- "qC21 .req q4\n" "qC22 .req q5\n" "qC23 .req q6\n" "qC24 .req q7\n"
- "vC31 .req v8\n" "vC32 .req v9\n" "vC33 .req v10\n" "vC34 .req v11\n"
- "qC31 .req q8\n" "qC32 .req q9\n" "qC33 .req q10\n" "qC34 .req q11\n"
- "vC41 .req v12\n" "vC42 .req v13\n" "vC43 .req v14\n" "vC44 .req v15\n"
- "qC41 .req q12\n" "qC42 .req q13\n" "qC43 .req q14\n" "qC44 .req q15\n"
- "vA1 .req v16\n" "qA1 .req q16\n" "dA1 .req d16\n" "sA1 .req s16\n"
- "vA2 .req v17\n" "qA2 .req q17\n" "dA2 .req d17\n" "sA2 .req s17\n"
- "vA3 .req v18\n" "qA3 .req q18\n" "dA3 .req d18\n" "sA3 .req s18\n"
- "vA4 .req v19\n" "qA4 .req q19\n" "dA4 .req d19\n" "sA4 .req s19\n"
- "vB1 .req v20\n" "qB1 .req q20\n"
- "vB2 .req v21\n" "qB2 .req q21\n"
- "vB3 .req v22\n" "qB3 .req q22\n"
- "vB4 .req v23\n" "qB4 .req q23\n"
-
- // Clear accumulators, initialise pointers
- "movi vC11.4s, #0\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "movi vC12.4s, #0\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "movi vC13.4s, #0\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "movi vC14.4s, #0\n"
- "add aptr2, %x[aptr], %x[a_row_stride_bytes]\n"
- "movi vC21.4s, #0\n"
- "add aptr3, aptr2, %x[a_row_stride_bytes]\n"
- "movi vC22.4s, #0\n"
- "add aptr4, aptr3, %x[a_row_stride_bytes]\n"
- "movi vC23.4s, #0\n"
- "cbnz %x[k], 3f\n"
-
- // Prepare for tail in K
- "movi vC24.4s, #0\n"
- "ldr sA1, [%x[aptr]], #0x04\n"
- "movi vC31.4s, #0\n"
- "ldr sA2, [ aptr2], #0x04\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "b 2f\n" // Jump to tail
-
- "3:" // Prepare for loop over K
- "movi vC24.4s, #0\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "movi vC31.4s, #0\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "subs %x[k], %x[k], #1\n"
- "beq 4f\n"
-
- "1:" // Loop proper
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "subs %x[k], %x[k], #1\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
- "bne 1b\n"
-
- "4:" // Tail iteration
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr sA1, [%x[aptr]], #0x04\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr sA2, [ aptr2], #0x04\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
-
- "2:" // Common tail
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "stp qC11, qC12, [%x[cptr], #0x00]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "ldr sA3, [ aptr3], #0x04\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "stp qC13, qC14, [%x[cptr], #0x20]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "stp qC21, qC22, [%x[cptr], #0x00]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "ldr sA4, [ aptr4], #0x04\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "stp qC23, qC24, [%x[cptr], #0x20]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "stp qC31, qC32, [%x[cptr], #0x00]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "stp qC33, qC34, [%x[cptr], #0x20]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "stp qC41, qC42, [%x[cptr], #0x00]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
- "stp qC43, qC44, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
-
- ".unreq vB4\n" ".unreq qB4\n"
- ".unreq vB3\n" ".unreq qB3\n"
- ".unreq vB2\n" ".unreq qB2\n"
- ".unreq vB1\n" ".unreq qB1\n"
- ".unreq vA4\n" ".unreq qA4\n" ".unreq dA4\n" ".unreq sA4\n"
- ".unreq vA3\n" ".unreq qA3\n" ".unreq dA3\n" ".unreq sA3\n"
- ".unreq vA2\n" ".unreq qA2\n" ".unreq dA2\n" ".unreq sA2\n"
- ".unreq vA1\n" ".unreq qA1\n" ".unreq dA1\n" ".unreq sA1\n"
- ".unreq qC41\n" ".unreq qC42\n" ".unreq qC43\n" ".unreq qC44\n"
- ".unreq vC41\n" ".unreq vC42\n" ".unreq vC43\n" ".unreq vC44\n"
- ".unreq qC31\n" ".unreq qC32\n" ".unreq qC33\n" ".unreq qC34\n"
- ".unreq vC31\n" ".unreq vC32\n" ".unreq vC33\n" ".unreq vC34\n"
- ".unreq qC21\n" ".unreq qC22\n" ".unreq qC23\n" ".unreq qC24\n"
- ".unreq vC21\n" ".unreq vC22\n" ".unreq vC23\n" ".unreq vC24\n"
- ".unreq qC11\n" ".unreq qC12\n" ".unreq qC13\n" ".unreq qC14\n"
- ".unreq vC11\n" ".unreq vC12\n" ".unreq vC13\n" ".unreq vC14\n"
- ".unreq aptr2\n"
- ".unreq aptr3\n"
- ".unreq aptr4\n"
-
- : [aptr] "+r" (aptr),
- [bptr] "+r" (bptr),
- [cptr] "+r" (cptr),
- [k] "+r" (k)
- : [a_row_stride_bytes] "r" (a_row_stride * sizeof(float)),
- [b_row_stride_bytes] "r" (b_row_stride * sizeof(float)),
- [c_row_stride_bytes] "r" (c_row_stride * sizeof(float))
- : "cc", "memory", "x20", "x21", "x22",
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
- "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
- "v21", "v22", "v23"
- );
- }
- }
-}
-
-template <>
-inline void sgemm_4x16_impl<2>(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- const int TAIL_SIZE = 2;
- const int M_BLOCK = 4;
- const int N_BLOCK = 16;
-
- const int m_blocks = iceildiv(M, M_BLOCK);
- const int n_blocks = iceildiv(N, N_BLOCK);
-
- // For each block of output rows
- for (int mblock = 0; mblock < m_blocks; mblock++) {
- // For each block of output columns
- for (int nblock = 0; nblock < n_blocks; nblock++) {
- const float *aptr = a + mblock*M_BLOCK*a_row_stride;
- const float *bptr = b + nblock*N_BLOCK;
- float *cptr = c + mblock*M_BLOCK*c_row_stride + nblock*N_BLOCK;
- int k = (K - TAIL_SIZE) / 4;
-
- asm volatile(
- "aptr2 .req X20\n"
- "aptr3 .req X21\n"
- "aptr4 .req X22\n"
- "vC11 .req v0\n" "vC12 .req v1\n" "vC13 .req v2\n" "vC14 .req v3\n"
- "qC11 .req q0\n" "qC12 .req q1\n" "qC13 .req q2\n" "qC14 .req q3\n"
- "vC21 .req v4\n" "vC22 .req v5\n" "vC23 .req v6\n" "vC24 .req v7\n"
- "qC21 .req q4\n" "qC22 .req q5\n" "qC23 .req q6\n" "qC24 .req q7\n"
- "vC31 .req v8\n" "vC32 .req v9\n" "vC33 .req v10\n" "vC34 .req v11\n"
- "qC31 .req q8\n" "qC32 .req q9\n" "qC33 .req q10\n" "qC34 .req q11\n"
- "vC41 .req v12\n" "vC42 .req v13\n" "vC43 .req v14\n" "vC44 .req v15\n"
- "qC41 .req q12\n" "qC42 .req q13\n" "qC43 .req q14\n" "qC44 .req q15\n"
- "vA1 .req v16\n" "qA1 .req q16\n" "dA1 .req d16\n" "sA1 .req s16\n"
- "vA2 .req v17\n" "qA2 .req q17\n" "dA2 .req d17\n" "sA2 .req s17\n"
- "vA3 .req v18\n" "qA3 .req q18\n" "dA3 .req d18\n" "sA3 .req s18\n"
- "vA4 .req v19\n" "qA4 .req q19\n" "dA4 .req d19\n" "sA4 .req s19\n"
- "vB1 .req v20\n" "qB1 .req q20\n"
- "vB2 .req v21\n" "qB2 .req q21\n"
- "vB3 .req v22\n" "qB3 .req q22\n"
- "vB4 .req v23\n" "qB4 .req q23\n"
-
- // Clear accumulators, initialise pointers
- "movi vC11.4s, #0\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "movi vC12.4s, #0\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "movi vC13.4s, #0\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "movi vC14.4s, #0\n"
- "add aptr2, %x[aptr], %x[a_row_stride_bytes]\n"
- "movi vC21.4s, #0\n"
- "add aptr3, aptr2, %x[a_row_stride_bytes]\n"
- "movi vC22.4s, #0\n"
- "add aptr4, aptr3, %x[a_row_stride_bytes]\n"
- "movi vC23.4s, #0\n"
- "cbnz %x[k], 3f\n"
-
- // Prepare for tail in K
- "movi vC24.4s, #0\n"
- "ldr dA1, [%x[aptr]], #0x08\n"
- "movi vC31.4s, #0\n"
- "ldr dA2, [ aptr2], #0x08\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "b 2f\n" // Jump to tail
-
- "3:" // Prepare for loop over K
- "movi vC24.4s, #0\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "movi vC31.4s, #0\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "subs %x[k], %x[k], #1\n"
- "beq 4f\n"
-
- "1:" // Loop proper
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "subs %x[k], %x[k], #1\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
- "bne 1b\n"
-
- "4:" // Tail iteration
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr dA1, [%x[aptr]], #0x08\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr dA2, [ aptr2], #0x08\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
-
- "2:" // Common tail
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr dA3, [ aptr3], #0x08\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr dA4, [ aptr4], #0x08\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "stp qC11, qC12, [%x[cptr], #0x00]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "stp qC13, qC14, [%x[cptr], #0x20]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "stp qC21, qC22, [%x[cptr], #0x00]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "stp qC23, qC24, [%x[cptr], #0x20]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "stp qC31, qC32, [%x[cptr], #0x00]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "stp qC33, qC34, [%x[cptr], #0x20]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "stp qC41, qC42, [%x[cptr], #0x00]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
- "stp qC43, qC44, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
-
- ".unreq vB4\n" ".unreq qB4\n"
- ".unreq vB3\n" ".unreq qB3\n"
- ".unreq vB2\n" ".unreq qB2\n"
- ".unreq vB1\n" ".unreq qB1\n"
- ".unreq vA4\n" ".unreq qA4\n" ".unreq dA4\n" ".unreq sA4\n"
- ".unreq vA3\n" ".unreq qA3\n" ".unreq dA3\n" ".unreq sA3\n"
- ".unreq vA2\n" ".unreq qA2\n" ".unreq dA2\n" ".unreq sA2\n"
- ".unreq vA1\n" ".unreq qA1\n" ".unreq dA1\n" ".unreq sA1\n"
- ".unreq qC41\n" ".unreq qC42\n" ".unreq qC43\n" ".unreq qC44\n"
- ".unreq vC41\n" ".unreq vC42\n" ".unreq vC43\n" ".unreq vC44\n"
- ".unreq qC31\n" ".unreq qC32\n" ".unreq qC33\n" ".unreq qC34\n"
- ".unreq vC31\n" ".unreq vC32\n" ".unreq vC33\n" ".unreq vC34\n"
- ".unreq qC21\n" ".unreq qC22\n" ".unreq qC23\n" ".unreq qC24\n"
- ".unreq vC21\n" ".unreq vC22\n" ".unreq vC23\n" ".unreq vC24\n"
- ".unreq qC11\n" ".unreq qC12\n" ".unreq qC13\n" ".unreq qC14\n"
- ".unreq vC11\n" ".unreq vC12\n" ".unreq vC13\n" ".unreq vC14\n"
- ".unreq aptr2\n"
- ".unreq aptr3\n"
- ".unreq aptr4\n"
-
- : [aptr] "+r" (aptr),
- [bptr] "+r" (bptr),
- [cptr] "+r" (cptr),
- [k] "+r" (k)
- : [a_row_stride_bytes] "r" (a_row_stride * sizeof(float)),
- [b_row_stride_bytes] "r" (b_row_stride * sizeof(float)),
- [c_row_stride_bytes] "r" (c_row_stride * sizeof(float))
- : "cc", "memory", "x20", "x21", "x22",
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
- "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
- "v21", "v22", "v23"
- );
- }
- }
-}
-
-template <>
-inline void sgemm_4x16_impl<3>(
- const float* const a, const float* const b, float *c,
- const int M, const int K, const int N,
- const int a_row_stride,
- const int b_row_stride,
- const int c_row_stride
-) {
- const int TAIL_SIZE = 3;
- const int M_BLOCK = 4;
- const int N_BLOCK = 16;
-
- const int m_blocks = iceildiv(M, M_BLOCK);
- const int n_blocks = iceildiv(N, N_BLOCK);
-
- // For each block of output rows
- for (int mblock = 0; mblock < m_blocks; mblock++) {
- // For each block of output columns
- for (int nblock = 0; nblock < n_blocks; nblock++) {
- const float *aptr = a + mblock*M_BLOCK*a_row_stride;
- const float *bptr = b + nblock*N_BLOCK;
- float *cptr = c + mblock*M_BLOCK*c_row_stride + nblock*N_BLOCK;
- int k = (K - TAIL_SIZE) / 4;
-
- asm volatile(
- "aptr2 .req X20\n"
- "aptr3 .req X21\n"
- "aptr4 .req X22\n"
- "vC11 .req v0\n" "vC12 .req v1\n" "vC13 .req v2\n" "vC14 .req v3\n"
- "qC11 .req q0\n" "qC12 .req q1\n" "qC13 .req q2\n" "qC14 .req q3\n"
- "vC21 .req v4\n" "vC22 .req v5\n" "vC23 .req v6\n" "vC24 .req v7\n"
- "qC21 .req q4\n" "qC22 .req q5\n" "qC23 .req q6\n" "qC24 .req q7\n"
- "vC31 .req v8\n" "vC32 .req v9\n" "vC33 .req v10\n" "vC34 .req v11\n"
- "qC31 .req q8\n" "qC32 .req q9\n" "qC33 .req q10\n" "qC34 .req q11\n"
- "vC41 .req v12\n" "vC42 .req v13\n" "vC43 .req v14\n" "vC44 .req v15\n"
- "qC41 .req q12\n" "qC42 .req q13\n" "qC43 .req q14\n" "qC44 .req q15\n"
- "vA1 .req v16\n" "qA1 .req q16\n" "dA1 .req d16\n" "sA1 .req s16\n"
- "vA2 .req v17\n" "qA2 .req q17\n" "dA2 .req d17\n" "sA2 .req s17\n"
- "vA3 .req v18\n" "qA3 .req q18\n" "dA3 .req d18\n" "sA3 .req s18\n"
- "vA4 .req v19\n" "qA4 .req q19\n" "dA4 .req d19\n" "sA4 .req s19\n"
- "vB1 .req v20\n" "qB1 .req q20\n"
- "vB2 .req v21\n" "qB2 .req q21\n"
- "vB3 .req v22\n" "qB3 .req q22\n"
- "vB4 .req v23\n" "qB4 .req q23\n"
-
- // Clear accumulators, initialise pointers
- "movi vC11.4s, #0\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "movi vC12.4s, #0\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "movi vC13.4s, #0\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "movi vC14.4s, #0\n"
- "add aptr2, %x[aptr], %x[a_row_stride_bytes]\n"
- "movi vC21.4s, #0\n"
- "add aptr3, aptr2, %x[a_row_stride_bytes]\n"
- "movi vC22.4s, #0\n"
- "add aptr4, aptr3, %x[a_row_stride_bytes]\n"
- "movi vC23.4s, #0\n"
- "cbnz %x[k], 3f\n"
-
- // Prepare for tail in K
- "movi vC24.4s, #0\n"
- "ldr dA1, [%x[aptr]], #0x08\n"
- "movi vC31.4s, #0\n"
- "ldr dA2, [ aptr2], #0x08\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "b 2f\n" // Jump to tail
-
- "3:" // Prepare for loop over K
- "movi vC24.4s, #0\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "movi vC31.4s, #0\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "movi vC32.4s, #0\n"
- "movi vC33.4s, #0\n"
- "movi vC34.4s, #0\n"
- "movi vC41.4s, #0\n"
- "movi vC42.4s, #0\n"
- "movi vC43.4s, #0\n"
- "movi vC44.4s, #0\n"
- "subs %x[k], %x[k], #1\n"
- "beq 4f\n"
-
- "1:" // Loop proper
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "subs %x[k], %x[k], #1\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr qA1, [%x[aptr]], #0x10\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr qA2, [ aptr2], #0x10\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
- "bne 1b\n"
-
- "4:" // Tail iteration
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qA3, [ aptr3], #0x10\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr qA4, [ aptr4], #0x10\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[2]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[2]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[2]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[2]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[2]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[2]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[2]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[2]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[2]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[2]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[2]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[2]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[2]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[2]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[2]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[2]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[3]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[3]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[3]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[3]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[3]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[3]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[3]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[3]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[3]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[3]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[3]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[3]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[3]\n"
- "ldr dA1, [%x[aptr]], #0x08\n"
- "fmla vC24.4s, vB4.4s, vA2.s[3]\n"
- "ldr dA2, [ aptr2], #0x08\n"
- "fmla vC34.4s, vB4.4s, vA3.s[3]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[3]\n"
-
- "2:" // Common tail
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr dA3, [ aptr3], #0x08\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "ldr dA4, [ aptr4], #0x08\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[1]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[1]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[1]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[1]\n"
- "add %x[bptr], %x[bptr], %x[b_row_stride_bytes]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[1]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[1]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[1]\n"
- "ldr qB1, [%x[bptr], #0x00]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[1]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[1]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[1]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[1]\n"
- "ldr qB2, [%x[bptr], #0x10]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[1]\n"
- "fmla vC14.4s, vB4.4s, vA1.s[1]\n"
- "ldr sA1, [%x[aptr]], #0x04\n"
- "fmla vC24.4s, vB4.4s, vA2.s[1]\n"
- "ldr sA2, [ aptr2], #0x04\n"
- "fmla vC34.4s, vB4.4s, vA3.s[1]\n"
- "ldr qB3, [%x[bptr], #0x20]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[1]\n"
-
- "fmla vC11.4s, vB1.4s, vA1.s[0]\n"
- "ldr qB4, [%x[bptr], #0x30]\n"
- "fmla vC12.4s, vB2.4s, vA1.s[0]\n"
- "stp qC11, qC12, [%x[cptr], #0x00]\n"
- "fmla vC13.4s, vB3.4s, vA1.s[0]\n"
- "ldr sA3, [ aptr3], #0x04\n"
- "fmla vC14.4s, vB4.4s, vA1.s[0]\n"
- "stp qC13, qC14, [%x[cptr], #0x20]\n"
- "fmla vC21.4s, vB1.4s, vA2.s[0]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC22.4s, vB2.4s, vA2.s[0]\n"
- "stp qC21, qC22, [%x[cptr], #0x00]\n"
- "fmla vC23.4s, vB3.4s, vA2.s[0]\n"
- "ldr sA4, [ aptr4], #0x04\n"
- "fmla vC24.4s, vB4.4s, vA2.s[0]\n"
- "stp qC23, qC24, [%x[cptr], #0x20]\n"
- "fmla vC31.4s, vB1.4s, vA3.s[0]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC32.4s, vB2.4s, vA3.s[0]\n"
- "stp qC31, qC32, [%x[cptr], #0x00]\n"
- "fmla vC33.4s, vB3.4s, vA3.s[0]\n"
- "fmla vC34.4s, vB4.4s, vA3.s[0]\n"
- "stp qC33, qC34, [%x[cptr], #0x20]\n"
- "fmla vC41.4s, vB1.4s, vA4.s[0]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
- "fmla vC42.4s, vB2.4s, vA4.s[0]\n"
- "stp qC41, qC42, [%x[cptr], #0x00]\n"
- "fmla vC43.4s, vB3.4s, vA4.s[0]\n"
- "fmla vC44.4s, vB4.4s, vA4.s[0]\n"
- "stp qC43, qC44, [%x[cptr], #0x20]\n"
- "add %x[cptr], %x[cptr], %x[c_row_stride_bytes]\n"
-
- ".unreq vB4\n" ".unreq qB4\n"
- ".unreq vB3\n" ".unreq qB3\n"
- ".unreq vB2\n" ".unreq qB2\n"
- ".unreq vB1\n" ".unreq qB1\n"
- ".unreq vA4\n" ".unreq qA4\n" ".unreq dA4\n" ".unreq sA4\n"
- ".unreq vA3\n" ".unreq qA3\n" ".unreq dA3\n" ".unreq sA3\n"
- ".unreq vA2\n" ".unreq qA2\n" ".unreq dA2\n" ".unreq sA2\n"
- ".unreq vA1\n" ".unreq qA1\n" ".unreq dA1\n" ".unreq sA1\n"
- ".unreq qC41\n" ".unreq qC42\n" ".unreq qC43\n" ".unreq qC44\n"
- ".unreq vC41\n" ".unreq vC42\n" ".unreq vC43\n" ".unreq vC44\n"
- ".unreq qC31\n" ".unreq qC32\n" ".unreq qC33\n" ".unreq qC34\n"
- ".unreq vC31\n" ".unreq vC32\n" ".unreq vC33\n" ".unreq vC34\n"
- ".unreq qC21\n" ".unreq qC22\n" ".unreq qC23\n" ".unreq qC24\n"
- ".unreq vC21\n" ".unreq vC22\n" ".unreq vC23\n" ".unreq vC24\n"
- ".unreq qC11\n" ".unreq qC12\n" ".unreq qC13\n" ".unreq qC14\n"
- ".unreq vC11\n" ".unreq vC12\n" ".unreq vC13\n" ".unreq vC14\n"
- ".unreq aptr2\n"
- ".unreq aptr3\n"
- ".unreq aptr4\n"
-
- : [aptr] "+r" (aptr),
- [bptr] "+r" (bptr),
- [cptr] "+r" (cptr),
- [k] "+r" (k)
- : [a_row_stride_bytes] "r" (a_row_stride * sizeof(float)),
- [b_row_stride_bytes] "r" (b_row_stride * sizeof(float)),
- [c_row_stride_bytes] "r" (c_row_stride * sizeof(float))
- : "cc", "memory", "x20", "x21", "x22",
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
- "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
- "v21", "v22", "v23"
- );
- }
- }
-}
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
deleted file mode 100644
index b813bbb25c..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-#include "../winograd_gemm.hpp"
-
-namespace winograd
-{
- /***************************************************************************/
- /* Instance-less API */
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void InputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::execute(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int in_batch_stride, /** Stride between batches of the input. */
- const int n_rows, /** Number of rows in input tensor. */
- const int in_row_stride, /** Stride between rows of the input. */
- const int n_cols, /** Number of columns in input tensor. */
- const int in_col_stride, /** Stride between columns of the input. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- const int tile_M,
- const int tile_N,
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_batch_stride, /** Stride between batches within the matrix. */
- const int matrix_row_stride /** Stride within matrices. */
- )
- {
- // Compute the padding required on each edge of the image
- const int pad_top = (padding == PADDING_SAME) ? (KernelRows - 1) / 2 : 0;
- const int pad_left = (padding == PADDING_SAME) ? (KernelCols - 1) / 2 : 0;
-
- // Compute striding values (assuming NHWC ordered data)
- const int output_col_stride = matrix_row_stride;
- const int output_row_stride = tile_N * output_col_stride;
-
- // Loop over batches
- for (int batch = 0; batch < n_batches; batch++)
- {
- // Pointer to the batch
- const T* const input_base_batch = input + batch * in_batch_stride;
- T* const outptr_base_batch = output + batch * matrix_batch_stride;
-
- // Loop over rows of tiles
- for (int tile_i = 0; tile_i < tile_M; tile_i++)
- {
- // Padding (top + bottom) for the row
- const int row_top = tile_i*(InnerTileRows - overlap_rows) - pad_top;
- const int row_bottom = row_top + InnerTileRows;
- const int row_pad_top = std::max(0, pad_top - tile_i*(InnerTileRows - overlap_rows));
- const int row_pad_bottom = (row_bottom <= n_rows) ? 0 : row_bottom - n_rows;
-
- // Pointer to the row
- const int row_offset = std::min(0, row_pad_top - pad_top);
- const T* const input_base_row = (
- input_base_batch + ((InnerTileRows - overlap_rows)*tile_i + row_offset)*in_row_stride
- );
- T* const outptr_base_row = outptr_base_batch + tile_i*output_row_stride;
-
- // Process the row
- process_tile_row(
- tile_N, n_channels,
- input_base_row, in_row_stride, in_col_stride,
- outptr_base_row, matrix_stride, matrix_row_stride,
- row_pad_top, pad_left, row_pad_bottom, n_cols
- );
- }
- }
- }
-
-
- template <int KernelRows, int InnerTileRows, typename T>
- void InputTransformImpl<KernelRows, 1, InnerTileRows, 1, T>::execute(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int in_batch_stride, /** Stride between batches of the input. */
- const int n_rows, /** Number of rows in input tensor. */
- const int in_row_stride, /** Stride between rows of the input. */
- const int n_cols, /** Number of columns in input tensor. */
- const int in_col_stride, /** Stride between columns of the input. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- const int tile_M,
- const int tile_N,
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_batch_stride, /** Stride between batches within the matrix. */
- const int matrix_row_stride /** Stride within matrices. */
- )
- {
- // If an Nx1 kernel then transpose and redirect to the 1xN implementation
- InputTransformImpl<1, KernelRows, 1, InnerTileRows, T>::execute(
- input,
- n_batches, in_batch_stride,
- n_cols, in_col_stride,
- n_rows, in_row_stride,
- n_channels, padding,
- tile_N, tile_M,
- output, matrix_stride, matrix_batch_stride, matrix_row_stride
- );
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void InputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::process_tile_row(
- const int tile_N,
- int n_channels,
- const T* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const int pad_top,
- const int row_pad_left,
- const int pad_bottom,
- const int n_cols
- )
- {
- // Loop over columns of tiles
- for (int tile_j = 0; tile_j < tile_N; tile_j++)
- {
- // Padding (left + right) for the tile
- const int t_start = tile_j*(InnerTileCols - overlap_cols) - row_pad_left;
- const int t_end = t_start + InnerTileCols;
- const int t_pad_left = std::max(0, row_pad_left - tile_j*(InnerTileCols - overlap_cols));
- const int t_pad_right = (t_end <= n_cols) ? 0 : t_end - n_cols;
-
- // Get pointers into the inputs and outputs
- const int col_offset = std::min(0, t_pad_left - row_pad_left);
- const T* const input_base_col = (
- input_base + ((InnerTileCols - overlap_cols)*tile_j + col_offset)*input_col_stride
- );
- T* const outptr = matrix_base + tile_j*matrix_row_stride;
-
- // Apply the specific tile processing function
- const typename Tiles::TileFn tilefn = Tiles::get_tile_specialization(
- pad_top, t_pad_left, pad_bottom, t_pad_right
- );
-
- tilefn(
- n_channels,
- input_base_col, input_row_stride, input_col_stride,
- outptr, matrix_stride,
- pad_top, t_pad_left, pad_bottom, t_pad_right
- );
- }
- }
-
- /***************************************************************************/
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- InputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::InputTransform(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int n_rows, /** Number of rows in input tensor. */
- const int n_cols, /** Number of columns in input tensor. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_row_stride, /** Stride within matrices. */
- const int in_batch_stride, /** Stride between input batches. */
- const int in_row_stride, /** Stride between input rows. */
- const int in_col_stride /** Stride between input columns. */
- ) : _inptr(input), _outptr(output),
- _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
- _matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride),
- _tiles_M(iceildiv((padding == PADDING_SAME) ? n_rows : n_rows - KernelRows + 1,
- InnerTileRows - KernelRows + 1)),
- _tiles_N(iceildiv((padding == PADDING_SAME) ? n_cols : n_cols - KernelCols + 1,
- InnerTileCols - KernelCols + 1)),
- _in_col_stride(in_col_stride ? in_col_stride : n_channels),
- _in_row_stride(in_row_stride ? in_row_stride : n_cols * _in_col_stride),
- _in_batch_stride(in_batch_stride ? in_batch_stride : n_rows * _in_row_stride),
- _padding_type(padding)
- {
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- unsigned int InputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::get_window() const
- {
- // The final window includes the tail, all other windows will be a multiple
- // of the window block in size.
- return iceildiv(_n_channels, WINDOW_BLOCK);
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void InputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::run(
- const unsigned int start, const unsigned int stop
- )
- {
- if (start >= get_window())
- {
- return;
- }
-
- // Determine the window of work to perform
- const unsigned int start_channel = start * WINDOW_BLOCK;
- const unsigned int stop_channel = std::min<const unsigned int>(
- stop * WINDOW_BLOCK, _n_channels
- );
- const unsigned int n_channels = stop_channel - start_channel;
-
- // Perform the work
- execute(
- _inptr + start_channel,
- _n_batches, _in_batch_stride,
- _n_rows, _in_row_stride,
- _n_cols, _in_col_stride,
- n_channels,
- _padding_type,
- _tiles_M,
- _tiles_N,
- _outptr + start_channel,
- _matrix_stride,
- _matrix_row_stride * _tiles_M * _tiles_N,
- _matrix_row_stride
- );
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void InputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::execute(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int in_batch_stride, /** Stride between batches of the input. */
- const int n_rows, /** Number of rows in input tensor. */
- const int in_row_stride, /** Stride between rows of the input. */
- const int n_cols, /** Number of columns in input tensor. */
- const int in_col_stride, /** Stride between columns of the input. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- const int tile_M,
- const int tile_N,
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_batch_stride, /** Stride between batches within the matrix. */
- const int matrix_row_stride /** Stride within matrices. */
- )
- {
- Transform::execute(
- input, n_batches, in_batch_stride, n_rows, in_row_stride, n_cols,
- in_col_stride, n_channels, padding, tile_M, tile_N, output,
- matrix_stride, matrix_batch_stride, matrix_row_stride
- );
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- typename InputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::TileFn
- InputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::
- get_tile_specialization(
- const int pad_top,
- const int pad_left,
- const int pad_bottom,
- const int pad_right
- )
- {
- if (!(pad_top || pad_left || pad_bottom || pad_right))
- {
- // No padding, return unpadded specialisation
- return tilefn_unpadded;
- }
- else if (pad_top && !(pad_left || pad_bottom || pad_right))
- {
- // Top padding only
- const int index = (pad_top - min_pad_top) / (InnerTileRows - overlap_rows);
- return tilefn_top_padded[index];
- }
- else if (!(pad_top) && pad_left && !(pad_bottom || pad_right))
- {
- // Left padding only
- const int index = (pad_left - min_pad_left) / (InnerTileCols - overlap_cols);
- return tilefn_left_padded[index];
- }
- else if (!(pad_top || pad_left) && pad_bottom && !(pad_right))
- {
- // Bottom padding only
- return tilefn_bottom_padded[pad_bottom - 1];
- }
- else if (!(pad_top || pad_left || pad_bottom) && pad_right)
- {
- // Right padding only
- return tilefn_right_padded[pad_right - 1];
- }
- else
- {
- // Combination of paddings, return an unspecialised method
- return tilefn_generic;
- }
- }
-
- template <int KernelCols, int InnerTileCols, typename T>
- typename InputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>::TileFn
- InputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>::
- get_tile_specialization(
- const int pad_top,
- const int pad_left,
- const int pad_bottom,
- const int pad_right
- )
- {
- (void) pad_top;
- (void) pad_bottom;
-
- if (!(pad_left || pad_right))
- {
- // No padding, return unpadded specialisation
- return tilefn_unpadded;
- }
- else if (pad_left && !pad_right)
- {
- // Left padding only
- const int index = (pad_left - min_pad_left) / (InnerTileCols - overlap_cols);
- return tilefn_left_padded[index];
- }
- else if (!pad_left && pad_right)
- {
- // Right padding only
- return tilefn_right_padded[pad_right - 1];
- }
- else
- {
- // Combination of paddings, return an unspecialised method
- return tilefn_generic;
- }
- }
-}
-
-
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
deleted file mode 100644
index 77cd9de513..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-#include "../winograd_gemm.hpp"
-
-namespace winograd
-{
-/***************************************************************************/
- /* Instance-less API */
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void OutputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::execute(
- const int n_batches,
- const int output_batch_stride,
- const int n_rows,
- const int output_row_stride,
- const int n_cols,
- const int output_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- )
- {
- // Compute the number of tiles and hence the padding required on the bottom
- // and right of the image.
- const int tile_M = iceildiv(n_rows, OutputTileRows);
- const int tile_N = iceildiv(n_cols, OutputTileCols);
- const int pad_bottom = OutputTileRows*tile_M - n_rows;
- const int pad_right = OutputTileCols*tile_N - n_cols;
-
- const int matrix_tile_row_stride = tile_N * matrix_row_stride;
- const int matrix_batch_stride = tile_M * matrix_tile_row_stride;
-
- // Perform the output transformation for each batch
- for (int batch = 0; batch < n_batches; batch++)
- {
- // Get batch offset for input and outputs.
- const T* const matrix_batch = matrix_base + batch*matrix_batch_stride;
- T* const outptr_batch = output + batch*output_batch_stride;
-
- // Perform the output transformation for each row of the output tensor.
- for (int tile_i = 0; tile_i < tile_M; tile_i++)
- {
- // Compute properties of this row of output tiles
- const int row_pad_bottom = (tile_i < tile_M - 1) ? 0: pad_bottom;
- const T* const matrix_tile_row = matrix_batch + tile_i * matrix_tile_row_stride;
- T* const outptr_row = outptr_batch + OutputTileRows*tile_i*output_row_stride;
-
- // Process the row
- process_tile_row(
- tile_N, n_channels, matrix_tile_row, matrix_stride,
- matrix_row_stride, biases,
- outptr_row, output_row_stride, output_col_stride, row_pad_bottom,
- pad_right
- );
- }
- }
- }
-
-template <int KernelRows, int InnerTileRows, typename T>
- void OutputTransformImpl<KernelRows, 1, InnerTileRows, 1, T>::execute(
- const int n_batches,
- const int output_batch_stride,
- const int n_rows,
- const int output_row_stride,
- const int n_cols,
- const int output_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- )
- {
- // If an Nx1 kernel then transpose and redirect to the 1xN implementation.
- OutputTransformImpl<1, KernelRows, 1, InnerTileRows, T>::execute(
- n_batches,
- output_batch_stride,
- n_cols, output_col_stride,
- n_rows, output_row_stride,
- n_channels,
- matrix_base, matrix_stride, matrix_row_stride,
- biases, output
- );
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void OutputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::process_tile_row(
- const int tile_N,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int row_pad_bottom,
- const int row_pad_right
- )
- {
- // Loop over columns of tiles
- for (int tile_j = 0; tile_j < tile_N; tile_j++)
- {
- // Properties of this tile
- const int tile_pad_right = (tile_j < tile_N - 1) ? 0 : row_pad_right;
- const T* const matrix_row = matrix_base + tile_j * matrix_row_stride;
- T* const outptr = output + OutputTileCols *tile_j*output_col_stride;
-
- // Perform the output transformation
- const typename Tiles::TileFn tilefn = Tiles::get_tile_specialization(row_pad_bottom, tile_pad_right);
- tilefn(
- n_channels, matrix_row, matrix_stride, biases,
- outptr, output_row_stride, output_col_stride,
- row_pad_bottom, tile_pad_right
- );
- }
- }
-
-/***************************************************************************/
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::OutputTransform(
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output,
- const int n_batches,
- const int n_rows,
- const int n_cols,
- const int n_channels,
- const int out_batch_stride,
- const int out_row_stride,
- const int out_col_stride
- ) : _matrix_base(matrix_base), _biases(biases),
- _matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride),
- _outptr(output), _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols),
- _n_channels(n_channels), _tile_M(iceildiv(n_rows, OutputTileRows)),
- _tile_N(iceildiv(n_cols, OutputTileCols)),
- _out_col_stride(out_col_stride ? out_col_stride : n_channels),
- _out_row_stride(out_row_stride ? out_row_stride : n_cols * _out_col_stride),
- _out_batch_stride(out_batch_stride ? out_batch_stride : n_rows * _out_row_stride)
- {
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- unsigned int OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::get_window() const
- {
- // The final window includes the tail, all other windows will be a multiple
- // of the window block in size.
- return iceildiv(_n_channels, WINDOW_BLOCK);
- }
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::run(
- const unsigned int start, const unsigned int stop
- )
- {
- if (start >= get_window())
- {
- return;
- }
-
- // Determine the window of work to perform
- const unsigned int start_channel = start * WINDOW_BLOCK;
- const unsigned int stop_channel = std::min<const unsigned int>(
- stop * WINDOW_BLOCK, _n_channels
- );
- const unsigned int n_channels = stop_channel - start_channel;
-
- execute(
- _n_batches,
- _out_batch_stride,
- _n_rows,
- _out_row_stride,
- _n_cols,
- _out_col_stride,
- n_channels,
- _matrix_base + start_channel,
- _matrix_stride,
- _matrix_row_stride,
- (_biases != nullptr) ? _biases + start_channel : nullptr,
- _outptr + start_channel
- );
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- void OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::execute(
- const int n_batches,
- const int out_batch_stride,
- const int n_rows,
- const int out_row_stride,
- const int n_cols,
- const int out_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- )
- {
- Transform::execute(
- n_batches, out_batch_stride,
- n_rows, out_row_stride,
- n_cols, out_col_stride, n_channels,
- matrix_base, matrix_stride, matrix_row_stride,
- biases, output
- );
- }
-
- template <int KernelCols, int InnerTileCols, typename T>
- typename OutputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>::TileFn
- OutputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>::
- get_tile_specialization(const int pad_bottom, const int pad_right)
- {
- (void) pad_bottom;
-
- if (!pad_right)
- {
- // No padding, return unpadded specialisation
- return tilefn_unpadded;
- }
- else
- {
- return tilefn_right_padded[pad_right - 1];
- }
- }
-
- template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
- typename OutputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::TileFn
- OutputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::
- get_tile_specialization(const int pad_bottom, const int pad_right)
- {
- if (!(pad_bottom || pad_right))
- {
- // No padding, return unpadded specialisation
- return tilefn_unpadded;
- }
- else if (pad_bottom && !pad_right)
- {
- return tilefn_bottom_padded[pad_bottom - 1];
- }
- else if (!pad_bottom && pad_right)
- {
- return tilefn_right_padded[pad_right - 1];
- }
- else
- {
- return tilefn_generic;
- }
- }
-} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd.hpp
new file mode 100644
index 0000000000..183c9c1061
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd.hpp
@@ -0,0 +1,610 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "convolution.hpp"
+#include "tensor.hpp"
+#include "utils.hpp"
+
+namespace winograd
+{
+
+class ITransform
+{
+ public:
+ virtual ~ITransform() = default;
+
+ /**
+ * Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param nthreads The greatest number of threads that will be used to execute the transform.
+ * @return Size of working space required in bytes.
+ */
+ virtual size_t get_working_space_size(unsigned int nthreads=1) const = 0;
+
+ /**
+ * Set the working space to be used by the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param Pointer to the working space.
+ */
+ virtual void set_working_space(void *buffer) = 0;
+
+ /**
+ * Get the window of work a given operator can perform.
+ */
+ virtual unsigned int get_window() const = 0;
+
+ /**
+ * Perform work upon a window of the transform.
+ */
+ virtual void run(unsigned int start, unsigned int stop, unsigned int threadid=0) = 0;
+};
+
+class IInputTransform : public ITransform
+{
+ public:
+ virtual ~IInputTransform() = default;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ */
+ virtual void set_input_tensor(const void *input) = 0;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_input_tensor(const void *input, int col_stride) = 0;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_input_tensor(const void *input, int row_stride, int col_stride) = 0;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ * @param batch_stride Stride between batches of the tensor, measured in elements (not bytes).
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) = 0;
+
+ /**
+ * Set pointers to the matrices written by the transform.
+ * @param matrices Pointer to the start of the first matrix representing the transformed input.
+ * @param inter_matrix_stride Stride (in elements) between matrices.
+ * @param matrix_row_stride Stride (in elements) between the rows within a single matrix.
+ */
+ virtual void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0;
+};
+
+class IOutputTransform : public ITransform
+{
+ public:
+ virtual ~IOutputTransform() = default;
+
+ /**
+ * Set pointers to the matrices written by the transform.
+ * @param matrices Pointer to the start of the first matrix representing the input to the transform.
+ * @param inter_matrix_stride Stride (in elements) between matrices.
+ * @param matrix_row_stride Stride (in elements) between the rows within a single matrix.
+ */
+ virtual void set_input_matrices(const void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0;
+
+ /**
+ * Set pointer to the bias tensor (can be ignored or called with nullptr for no bias.
+ */
+ virtual void set_bias(const void *bias=nullptr) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ */
+ virtual void set_output_tensor(void *output) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_output_tensor(void *output, int col_stride) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_output_tensor(void *output, int row_stride, int col_stride) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ * @param batch_stride Stride between batches of the tensor, measured in elements (not bytes).
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) = 0;
+};
+
+class IWeightTransform : public ITransform
+{
+ public:
+ virtual ~IWeightTransform() = default;
+
+ /** Set pointer to the weight tensor read by the transform. */
+ virtual void set_weight_tensor(const void *weights) = 0;
+
+ /**
+ * Set pointers to the matrices written by the transform.
+ * @param matrices Pointer to the start of the first matrix representing the transformed input.
+ * @param inter_matrix_stride Stride (in elements) between matrices.
+ * @param matrix_row_stride Stride (in elements) between the rows within a single matrix.
+ */
+ virtual void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0;
+};
+
+enum class WinogradRoots
+{
+ Integers,
+};
+
+template <int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots>
+class InputTransform : public IInputTransform
+{
+ public:
+ /** Create an InputTransform operator fixed on a given problem and set of
+ * pointers.
+ */
+ InputTransform(
+ int kernel_rows, /**< Number of rows in the kernel */
+ int kernel_cols, /**< Number of columns in the kernel */
+ int n_batches, /**< Number of batches in input tensor. */
+ int n_rows, /**< Number of rows in input tensor. */
+ int n_cols, /**< Number of columns in input tensor. */
+ int n_channels, /**< Number of channels in input tensor. */
+ int padding_top, /**< Padding to apply to the top of the image. */
+ int padding_left, /**< Padding to apply to the left of the image. */
+ int padding_bottom, /**< Padding to apply to the bottom of the image. */
+ int padding_right /**< Padding to apply to the right of the image. */
+ );
+
+ InputTransform(InputTransform&) = delete;
+ InputTransform operator=(InputTransform&) = delete;
+
+ /** Set pointers to the input tensor read by the transform. */
+ void set_input_tensor(const void *input) override;
+ void set_input_tensor(const void *input, int col_stride) override;
+ void set_input_tensor(const void *input, int row_stride, int col_stride) override;
+ void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) override;
+
+ /** Set pointers to the matrices written by the transform. */
+ void set_output_matrices(void *matrices, int iter_matrix_stride, int matrix_row_stride) override;
+
+ /** Get the working space required to perform the transformation. */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const override;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ const int _n_batches, _n_rows, _n_cols, _n_channels;
+
+ private:
+ void transform_unpadded_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr
+ );
+
+ void transform_padded_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr,
+ int padding_top,
+ int padding_left,
+ int padding_bottom,
+ int padding_right
+ );
+
+ /* Tile implementation */
+ static void transform_tile(
+ int n_channels, /** @param[in] Number of channels in the tensor. */
+ const TIn* inptr_base, /** @param[in] Pointer to the base of the input tile. */
+ int input_row_stride, /** @param[in] Stride between rows of the input tensor. */
+ int input_col_stride, /** @param[in] Stride between columns of the input tensor. */
+ TOut* mptr_base, /** @param[out] Base pointer to transformed input matrices. */
+ int matrix_stride /** @param[in] Stride between matrices in the input space. */
+ );
+
+ /** Get the working space for a thread. */
+ void * get_working_space(unsigned int threadid) const;
+
+ const TIn* _inptr;
+ TOut* _outptr;
+
+ const int _overlap_rows, _overlap_cols;
+ const int _padding_top, _padding_left, _padding_bottom, _padding_right;
+ const int _tiles_M, _tiles_N;
+ int _matrix_stride, _matrix_row_stride, _matrix_batch_stride;
+ int _in_col_stride, _in_row_stride, _in_batch_stride;
+
+ const int _working_space_col_stride, _working_space_row_stride;
+ TIn *_working_space;
+};
+
+template <int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots>
+class InputTransform<InnerTileRows, 1, TIn, TOut, Roots> :
+ public InputTransform<1, InnerTileRows, TIn, TOut, Roots>
+{
+ using Base = InputTransform<1, InnerTileRows, TIn, TOut, Roots>;
+
+ public:
+ InputTransform(
+ int kernel_rows, /**< Number of rows in the kernel. */
+ int kernel_cols, /**< Number of columns in the kernel. */
+ int n_batches, /**< Number of batches in input tensor. */
+ int n_rows, /**< Number of rows in input tensor. */
+ int n_cols, /**< Number of columns in input tensor. */
+ int n_channels, /**< Number of channels in input tensor. */
+ int padding_top, /**< Padding to apply to the top of the image. */
+ int padding_left, /**< Padding to apply to the left of the image. */
+ int padding_bottom, /**< Padding to apply to the bottom of the image. */
+ int padding_right /**< Padding to apply to the right of the image. */
+ );
+
+ /** Set pointers to the input tensor read by the transform. */
+ void set_input_tensor(const void *input) override;
+ void set_input_tensor(const void *input, int col_stride) override;
+ void set_input_tensor(const void *input, int row_stride, int col_stride) override;
+ void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) override;
+};
+
+template <
+ int KernelRows, int KernelCols,
+ int InnerTileRows, int InnerTileCols,
+ typename TIn, typename TOut,
+ WinogradRoots Roots
+>
+class OutputTransform : public IOutputTransform
+{
+ public:
+ OutputTransform(
+ int n_batches, /**< Number of batches in output tensor. */
+ int n_rows, /**< Number of rows in output tensor. */
+ int n_cols, /**< Number of columns in output tensor. */
+ int n_channels /**< Number of channels in output tensor. */
+ );
+
+ OutputTransform(OutputTransform&) = delete;
+ OutputTransform operator=(OutputTransform&) = delete;
+
+ /** Set pointers to the matrices read by the transform. */
+ void set_input_matrices(const void *matrices, int iter_matrix_stride, int matrix_row_stride) override;
+
+ /** Set pointer to the bias tensor (can be ignored or called with nullptr for no bias */
+ void set_bias(const void *bias=nullptr) override;
+
+ /** Set pointers to the output tensor written by the transform. */
+ void set_output_tensor(void *output) override;
+ void set_output_tensor(void *output, int col_stride) override;
+ void set_output_tensor(void *output, int row_stride, int col_stride) override;
+ void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) override;
+
+ /** Get the working space required to perform the transformation. */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const override;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ static constexpr int inner_tile_rows = InnerTileRows;
+ static constexpr int inner_tile_cols = InnerTileCols;
+ static constexpr int output_tile_rows = InnerTileRows - KernelRows + 1;
+ static constexpr int output_tile_cols = InnerTileCols - KernelCols + 1;
+
+ const int _n_batches, _n_rows, _n_cols, _n_channels;
+
+ private:
+ void transform_uncropped_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr,
+ const TOut *biases
+ );
+
+ void transform_cropped_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr,
+ const TOut *biases,
+ int pad_bottom,
+ int pad_right
+ );
+
+ /** Implementation of the tile transformation method. */
+ static void transform_tile(
+ int n_channels,
+ const TIn* matrix_base,
+ int matrix_stride,
+ const TOut* biases,
+ TOut* output,
+ int output_row_stride,
+ int output_col_stride
+ );
+
+ /** Get the working space for a thread. */
+ void * get_working_space(unsigned int threadid) const;
+
+ const TIn* _matrix_base;
+ const TOut* _biases;
+ int _matrix_stride, _matrix_row_stride, _matrix_batch_stride;
+ TOut* _outptr;
+ const int _tiles_M, _tiles_N;
+ int _out_col_stride, _out_row_stride, _out_batch_stride;
+
+ const int _working_space_col_stride, _working_space_row_stride;
+ TOut *_working_space;
+};
+
+template <
+ int KernelRows,
+ int InnerTileRows,
+ typename TIn, typename TOut,
+ WinogradRoots Roots
+>
+class OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> :
+ public OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>
+{
+ using Base = OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>;
+
+ public:
+ OutputTransform(
+ int n_batches, /**< Number of batches in output tensor. */
+ int n_rows, /**< Number of rows in output tensor. */
+ int n_cols, /**< Number of columns in output tensor. */
+ int n_channels /**< Number of channels in output tensor. */
+ );
+
+ /** Set pointers to the output tensor written by the transform. */
+ void set_output_tensor(void *output) override;
+ void set_output_tensor(void *output, int col_stride) override;
+ void set_output_tensor(void *output, int row_stride, int col_stride) override;
+ void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) override;
+};
+
+template <
+ int KernelRows, int KernelCols,
+ int InnerTileRows, int InnerTileCols,
+ typename TIn, typename TOut,
+ WinogradRoots Roots
+>
+class WeightTransform : public IWeightTransform
+{
+ public:
+ WeightTransform(
+ int n_output_channels, /**< Number of output channels in the kernel. */
+ int n_input_channels /**< Number of input channels in the kernel. */
+ );
+
+ WeightTransform(WeightTransform&) = delete;
+ WeightTransform operator=(WeightTransform&) = delete;
+
+ /** Set pointer to the weight tensor read by the transform. */
+ void set_weight_tensor(const void *weights) override;
+
+ /** Set pointer to the matrices written by the transform. */
+ void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) override;
+
+ /** Get the working space required to perform the transformation. */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const override;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ static const int kernel_rows = KernelRows;
+ static const int kernel_cols = KernelCols;
+ static const int inner_tile_rows = InnerTileRows;
+ static const int inner_tile_cols = InnerTileCols;
+
+ private:
+ /** Apply the transform to a tensor. */
+ static void execute(
+ int n_output_channels,
+ int n_input_channels,
+ const TIn* input,
+ TOut* output,
+ int matrix_stride,
+ int matrix_row_stride
+ );
+
+ const int _n_output_channels, _n_input_channels;
+ TOut *_matrices;
+ int _matrix_stride, _matrix_row_stride;
+ const TIn *_weights;
+};
+
+template <int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots>
+class WeightTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> :
+ public WeightTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>
+{
+ public:
+ using WeightTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::WeightTransform;
+};
+
+template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols, WinogradRoots Roots>
+class WinogradGEMM
+{
+ public:
+ // Information about the specific Winograd instance
+ static constexpr int output_tile_rows = OutputTileRows;
+ static constexpr int output_tile_cols = OutputTileCols;
+ static constexpr int kernel_rows = KernelRows;
+ static constexpr int kernel_cols = KernelCols;
+ static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1;
+ static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1;
+ static constexpr int N_GEMMS = inner_tile_rows * inner_tile_cols;
+
+ /** Transform weights from the spatial to the Winograd domain. */
+ template <typename TIn, typename TOut>
+ using WeightsTransform = WeightTransform<
+ KernelRows, KernelCols, inner_tile_rows, inner_tile_cols,
+ TIn, TOut, Roots
+ >;
+
+ /** Transform input feature maps from the spatial to the Winograd domain.
+ */
+ template <typename TIn, typename TOut>
+ using InputTransform = InputTransform<
+ inner_tile_rows, inner_tile_cols, TIn, TOut, Roots
+ >;
+
+ /** Transform output feature maps from the Winograd to the spatial domain.
+ */
+ template <typename TIn, typename TOut>
+ using OutputTransform = OutputTransform<
+ KernelRows, KernelCols, inner_tile_rows, inner_tile_cols,
+ TIn, TOut, Roots
+ >;
+
+ /** Perform a convolution.
+ */
+ template <typename TOut, typename TIn, typename TInGEMM=TIn, typename TOutGEMM=TOut>
+ class Convolution
+ {
+ public:
+ // Information about the typed Winograd instance
+ typedef TOut OutputType;
+ typedef TOutGEMM GemmOutputType;
+ typedef TInGEMM GemmInputType;
+ typedef TIn InputType;
+
+ /** Get the output shape of a convolution. */
+ static Tensor4DShape get_output_shape(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &in_shape,
+ const PaddingType padding
+ );
+
+ /* Get the memory required to transform the kernel.
+ */
+ static size_t get_kernel_transform_working_size(const KernelShape &shape);
+
+ /** Get the memory required to store the kernel transformed into the
+ * Winograd domain.
+ */
+ static size_t get_kernel_storage_size(const KernelShape &shape);
+
+ /** Get the memory required to store the input tensor transformed into
+ * the Winograd domain.
+ */
+ static size_t get_input_storage_size(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ /** Get the memory required to store the output tensor in the Winograd
+ * domain.
+ */
+ static size_t get_output_storage_size(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ /** Get the memory required to apply a Winograd operator to some input.
+ */
+ static size_t get_working_space_size(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ /* Get the memory required by a single "input" matrix.
+ */
+ static size_t get_input_matrix_size(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ static int get_input_matrix_stride(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ /* Get the memory required by a single "output" matrix.
+ */
+ static size_t get_output_matrix_size(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ static int get_output_matrix_stride(
+ const KernelShape &kernel_shape,
+ const Tensor4DShape &input_shape,
+ const PaddingType padding_type
+ );
+
+ /* Get the memory required by a single "kernel" matrix.
+ */
+ static size_t get_kernel_matrix_size(const KernelShape &shape);
+ static int get_kernel_matrix_stride(const KernelShape &shape);
+
+ static constexpr int M_BLOCK = 4; /** Size of block used by GEMM. */
+ static constexpr int N_BLOCK = 16; /** Size of block used by GEMM. */
+ };
+};
+
+} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
deleted file mode 100644
index 71b5fd516f..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-
-#include "arm_compute/core/NEON/kernels/convolution/common/alloc.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp"
-#include "gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/shims.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
-#include "winograd_input_transform.hpp"
-#include "winograd_output_transform.hpp"
-
-#include <thread>
-#include <utility>
-#include <vector>
-
-// Generic Winograd implementation using GEMM
-namespace winograd
-{
-
-template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
-class WinogradGEMM
-{
- public:
- // Information about the specific Winograd instance
- static constexpr int output_tile_rows = OutputTileRows;
- static constexpr int output_tile_cols = OutputTileCols;
- static constexpr int kernel_rows = KernelRows;
- static constexpr int kernel_cols = KernelCols;
- static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1;
- static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1;
- static constexpr int N_GEMMS = inner_tile_rows * inner_tile_cols;
-
- /** Transform weights from the spatial to the Winograd domain. */
- template <typename T>
- struct WeightsTransform
- {
- /** Get the bytes read during the transform. */
- static inline size_t bytes_read(const KernelShape &shape)
- {
- return shape.size() * sizeof(T);
- }
-
- /** Get the bytes written during the transform. */
- static inline size_t bytes_written(const KernelShape &shape)
- {
- const int inner_tile_size = inner_tile_rows * inner_tile_cols;
- return (inner_tile_size * shape.n_input_channels *
- shape.n_output_channels * sizeof(T));
- }
-
- /** Get the count of operations performed by the transform. */
- static int ops_performed(const KernelShape &shape);
-
- /** Apply the transform to a tensor. */
- static void execute(
- const int n_output_channels,
- const int n_input_channels,
- const T* const input,
- T* const output,
- const int matrix_stride,
- const int matrix_row_stride
- );
-
- /** Create a WeightsTransform operator fixed on a given problem and set
- * of pointers.
- */
- WeightsTransform(
- const T* const input,
- T* const output,
- const int matrix_stride, /** Stride across matrices in the output. */
- const int matrix_row_stride, /** Stride across rows of the matrix. */
- const int n_output_channels, /** Number of filters. */
- const int n_input_channels /** Number of channels in each filter. */
- );
-
- /** Get the window of work a given operator can perform. */
- unsigned int get_window() const;
-
- /** Perform work upon a window of the input. */
- void run(const unsigned int start, const unsigned int stop);
-
- private:
- const T* const inptr; /** Fixed pointer to input data. */
- T* const outptr; /** Fixed pointer to output memory. */
- const int matrix_stride; /** Stride between output matrices. */
- const int matrix_row_stride; /** Stride within output matrices. */
- const int n_output_channels; /** Number of filters. */
- const int n_input_channels; /** Number of channels in each filter. */
- };
-
- /** Transform input feature maps from the spatial to the Winograd domain.
- */
- template <typename T>
- using InputTransform = InputTransform<
- KernelRows, KernelCols,
- (OutputTileRows + KernelRows - 1),
- (OutputTileCols + KernelCols - 1),
- T
- >;
-
- /** Transform output feature maps from the Winograd to the spatial domain.
- */
- template <typename T>
- using OutputTransform = OutputTransform<
- KernelRows, KernelCols,
- (OutputTileRows + KernelRows - 1),
- (OutputTileCols + KernelCols - 1),
- T
- >;
-
-
- /** Perform a convolution.
- */
- template <typename TOut, typename TIn>
- class Convolution
- {
- public:
- // Information about the typed Winograd instance
- typedef TOut OutputType;
- typedef TIn InputType;
-
- /** Get the output shape of a convolution. */
- static Tensor4DShape get_output_shape(
- const KernelShape &kernel_shape,
- const Tensor4DShape &in_shape,
- const PaddingType padding
- );
-
- /* Get the memory required to transform the kernel.
- */
- static size_t get_kernel_transform_working_size(const KernelShape &shape);
-
- /** Get the memory required to store the kernel transformed into the
- * Winograd domain.
- */
- static size_t get_kernel_storage_size(const KernelShape &shape);
-
- /** Get the memory required to store the input tensor transformed into
- * the Winograd domain.
- */
- static size_t get_input_storage_size(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- /** Get the memory required to store the output tensor in the Winograd
- * domain.
- */
- static size_t get_output_storage_size(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- /** Get the memory required to apply a Winograd operator to some input.
- */
- static size_t get_working_space_size(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- /* Get the memory required by a single "input" matrix.
- */
- static size_t get_input_matrix_size(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- static int get_input_matrix_stride(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- /* Get the memory required by a single "output" matrix.
- */
- static size_t get_output_matrix_size(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- static int get_output_matrix_stride(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding_type
- );
-
- /* Get the memory required by a single "kernel" matrix.
- */
- static size_t get_kernel_matrix_size(const KernelShape &shape);
- static int get_kernel_matrix_stride(const KernelShape &shape);
-
- static constexpr int M_BLOCK = 4; /** Size of block used by GEMM. */
- static constexpr int N_BLOCK = 16; /** Size of block used by GEMM. */
- };
-};
-
-} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_input_transform.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_input_transform.hpp
deleted file mode 100644
index 995554d7f2..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_input_transform.hpp
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-
-namespace winograd
-{
-
-namespace
-{
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
-class InputTransformImplTiles
-{
- public:
- /** Method to transform a tile of the input tensor into the Winograd domain. */
- typedef void (*TileFn)(
- const int n_channels, /** @param[in] Number of channels in the tensor. */
- const T* const inptr_base, /** @param[in] Pointer to the base of the input tile. */
- const int input_row_stride, /** @param[in] Stride between rows of the input tensor. */
- const int input_col_stride, /** @param[in] Stride between columns of the input tensor. */
- T* const mptr_base, /** @param[out] Base pointer to transformed input matrices. */
- const int matrix_stride, /** @param[in] Stride between matrices in the input space. */
- const int _pad_top, /** @param[in] Top padding for unspecialised tiles. */
- const int _pad_left, /** @param[in] Left padding for unspecialised tiles. */
- const int _pad_bottom, /** @param[in] Bottom padding for unspecialised tiles. */
- const int _pad_right /** @param[in] Right padding for unspecialised tiles. */
- );
-
- static TileFn get_tile_specialization(
- const int pad_top,
- const int pad_left,
- const int pad_bottom,
- const int pad_right
- );
-
- // Tile overlaps
- static constexpr int overlap_rows = KernelRows - 1;
- static constexpr int overlap_cols = KernelCols - 1;
-
- private:
-
- // Maximum padding and number of distinct paddings
- static constexpr int max_pad_top = KernelRows / 2;
- static constexpr int min_pad_top = KernelRows % (InnerTileRows - overlap_rows);
- static constexpr int n_pad_top = iceildiv(max_pad_top, InnerTileRows - overlap_rows);
-
- static constexpr int max_pad_left = KernelCols / 2;
- static constexpr int min_pad_left = KernelCols % (InnerTileCols - overlap_cols);
- static constexpr int n_pad_left = iceildiv(max_pad_left, InnerTileCols - overlap_cols);
-
- static constexpr int n_pad_bottom = InnerTileRows;
- static constexpr int n_pad_right = InnerTileCols;
-
- // Pointers to methods implementing a generically padded tile and a totally unpadded tile.
- static const TileFn tilefn_generic; /** Generic tile processing function. */
- static const TileFn tilefn_unpadded; /** Tile processor for unpadded tiles. */
-
- // Arrays of methods covering tiles which are padded only on a single side.
- static const TileFn tilefn_top_padded[n_pad_top];
- static const TileFn tilefn_left_padded[n_pad_left];
- static const TileFn tilefn_bottom_padded[n_pad_bottom];
- static const TileFn tilefn_right_padded[n_pad_right];
-};
-
-
-template < int KernelCols, int InnerTileCols, typename T>
-class InputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>
-{
- public:
- /** Method to transform a tile of the input tensor into the Winograd domain. */
- typedef void (*TileFn)(
- const int n_channels, /** @param[in] Number of channels in the tensor. */
- const T* const inptr_base, /** @param[in] Pointer to the base of the input tile. */
- const int input_row_stride, /** @param[in] Stride between rows of the input tensor. */
- const int input_col_stride, /** @param[in] Stride between columns of the input tensor. */
- T* const mptr_base, /** @param[out] Base pointer to transformed input matrices. */
- const int matrix_stride, /** @param[in] Stride between matrices in the input space. */
- const int _pad_top, /** @param[in] Top padding for unspecialised tiles. */
- const int _pad_left, /** @param[in] Left padding for unspecialised tiles. */
- const int _pad_bottom, /** @param[in] Bottom padding for unspecialised tiles. */
- const int _pad_right /** @param[in] Right padding for unspecialised tiles. */
- );
-
- static TileFn get_tile_specialization(
- const int pad_top,
- const int pad_left,
- const int pad_bottom,
- const int pad_right
- );
-
- // Tile overlaps
- static constexpr int overlap_rows = 0;
- static constexpr int overlap_cols = KernelCols - 1;
-
- private:
- // Maximum padding and number of distinct paddings
- static constexpr int max_pad_left = KernelCols / 2;
- static constexpr int min_pad_left = KernelCols % (InnerTileCols - overlap_cols);
- static constexpr int n_pad_left = iceildiv(max_pad_left, InnerTileCols - overlap_cols);
-
- static constexpr int n_pad_right = InnerTileCols;
-
- // Pointers to methods implementing a generically padded tile and a totally unpadded tile.
- static const TileFn tilefn_generic; /** Generic tile processing function. */
- static const TileFn tilefn_unpadded; /** Tile processor for unpadded tiles. */
-
- // Arrays of methods covering tiles which are padded only on a single side.
- static const TileFn tilefn_left_padded[n_pad_left];
- static const TileFn tilefn_right_padded[n_pad_right];
-};
-
-
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
-class InputTransformImpl
-{
- public:
- /** Apply the transform to a tensor. */
- static void execute(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int in_batch_stride, /** Stride between batches of the input. */
- const int n_rows, /** Number of rows in input tensor. */
- const int in_row_stride, /** Stride between rows of the input. */
- const int n_cols, /** Number of columns in input tensor. */
- const int in_col_stride, /** Stride between columns of the input. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- const int tile_M,
- const int tile_N,
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_batch_stride, /** Stride between batches within the matrix. */
- const int matrix_row_stride /** Stride within matrices. */
- );
-
- private:
- static void process_tile_row(
- const int tile_N,
- int n_channels,
- const T* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const int row_pad_top,
- const int row_pad_left,
- const int row_pad_bottom,
- const int n_cols
- );
-
- using Tiles = InputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>;
-
- static constexpr int overlap_rows = Tiles::overlap_rows;
- static constexpr int overlap_cols = Tiles::overlap_cols;
-
-
- };
-
-
-template <int KernelRows, int InnerTileRows, typename T>
-class InputTransformImpl<KernelRows, 1, InnerTileRows, 1, T>
-{
- public:
- /** Apply the transform to a tensor. */
- static void execute(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int in_batch_stride, /** Stride between batches of the input. */
- const int n_rows, /** Number of rows in input tensor. */
- const int in_row_stride, /** Stride between rows of the input. */
- const int n_cols, /** Number of columns in input tensor. */
- const int in_col_stride, /** Stride between columns of the input. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- const int tile_M,
- const int tile_N,
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_batch_stride, /** Stride between batches within the matrix. */
- const int matrix_row_stride /** Stride within matrices. */
- );
-};
-
-} // namespace (anonymous)
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
-class InputTransform
-{
- public:
- /***********************************************************************/
- /** Create an InputTransform operator fixed on a given problem and set of
- * pointers.
- */
- InputTransform(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int n_rows, /** Number of rows in input tensor. */
- const int n_cols, /** Number of columns in input tensor. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_row_stride, /** Stride within matrices. */
- const int in_batch_stride=0, /** Stride between input batches. */
- const int in_row_stride=0, /** Stride between input rows. */
- const int in_col_stride=0 /** Stride between input columns. */
- );
-
- /** Get the window of work a given operator can perform. */
- unsigned int get_window() const;
- static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
-
- /** Perform work upon a window of the input. */
- void run(const unsigned int start, const unsigned int stop);
-
- /** Apply the transform to a tensor. */
- static void execute(
- const T* const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int in_batch_stride, /** Stride between batches of the input. */
- const int n_rows, /** Number of rows in input tensor. */
- const int in_row_stride, /** Stride between rows of the input. */
- const int n_cols, /** Number of columns in input tensor. */
- const int in_col_stride, /** Stride between columns of the input. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- const int tile_M,
- const int tile_N,
- T* const output, /** Base of output matrices. */
- const int matrix_stride, /** Stride between output matrices. */
- const int matrix_batch_stride, /** Stride between batches within the matrix. */
- const int matrix_row_stride /** Stride within matrices. */
- );
-
- protected:
- using Transform = InputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>;
-
- /* Member values for instance-based API. */
- const T* const _inptr;
- T* const _outptr;
- const int _n_batches, _n_rows, _n_cols, _n_channels, _matrix_stride,
- _matrix_row_stride, _tiles_M, _tiles_N;
- const int _in_col_stride, _in_row_stride, _in_batch_stride;
- const PaddingType _padding_type;
-};
-
-} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_layer.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
new file mode 100644
index 0000000000..9d418bebb4
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <utility>
+
+#include "arm_gemm_local.hpp"
+#include "arm_gemm.hpp"
+#include "winograd.hpp"
+
+namespace winograd
+{
+
+
+class IWinogradConvolutionLayer
+{
+ public:
+ virtual ~IWinogradConvolutionLayer() = default;
+
+ virtual unsigned int weight_transform_get_window(void) const = 0;
+ virtual void weight_transform_run(unsigned int start, unsigned int stop) = 0;
+
+ virtual ITransform& input_transform(void) = 0; // Expose the input transform
+ virtual ITransform& output_transform(void) = 0; // Expose the output transform
+ virtual arm_gemm::IGemmCommon *gemm(void) = 0; // Expose the underlying GEMM
+};
+
+/** Example of how to construct an ACL-like interface.
+ *
+ * Use `get_weight_storage_size`, `get_input_storage_size` and
+ * `get_output_storage_size` to allocate memory for the convolution engine.
+ * Then create a `WinogradConvolutionLayer`.
+ *
+ * Initialise the weights using `weights_transform.run(...)`.
+ *
+ * For each inference:
+ * 1. Transform the inputs to the Winograd domain using `input_transform.run(...)`
+ * 2. Perform a number of GEMMs using `gemms.run(...)`
+ * 3. Transform the output to the spatial domain using `output_transform.run(...)`
+ */
+template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
+ typename TIn, typename TInGEMM, typename TOutGEMM, typename TOut,
+ WinogradRoots Roots>
+class WinogradConvolutionLayer : public IWinogradConvolutionLayer
+{
+ private:
+ static constexpr int InnerTileRows = OutputTileRows + KernelRows - 1;
+ static constexpr int InnerTileCols = OutputTileCols + KernelCols - 1;
+ static constexpr int N_GEMMS = InnerTileRows * InnerTileCols;
+
+ const KernelShape _kernel_shape;
+ const Tensor4DShape _input_shape;
+ const PaddingType _padding;
+ const Tensor4DShape _output_shape;
+ const int _n_output_rows, _n_output_cols;
+ const int _kernel_matrix_stride, _kernel_matrix_row_stride;
+ const int _input_matrix_stride, _input_matrix_row_stride;
+ const int _output_matrix_stride, _output_matrix_row_stride;
+ const int _tile_rows, _tile_cols;
+ const int _m, _k, _n;
+
+ public:
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, Roots>;
+ using WeightsTransform = typename WinogradBase::template WeightsTransform<TIn, TInGEMM>;
+ using InputTransform = typename WinogradBase::template InputTransform<TIn, TInGEMM>;
+ using WinogradConv = typename WinogradBase::template Convolution<TOut, TIn, TInGEMM, TOutGEMM>;
+ using OutputTransform = typename WinogradBase::template OutputTransform<TOutGEMM, TOut>;
+
+ /* Public member variables. */
+ WeightsTransform weights_transform; /** Operator to transform weights to Winograd domain. */
+ InputTransform _input_transform; /** Operator to transform input to Winograd domain. */
+ arm_gemm::UniqueGemmCommon<TInGEMM, TOutGEMM> gemms; /** Operator to perform multiple GEMMs. */
+ OutputTransform _output_transform; /** Operator to transform output from Winograd domain. */
+
+ /** Determine how much memory (in units of TIn) to allocate for the
+ * transformed weights.
+ */
+ static unsigned int get_weight_storage_size(
+ const int n_output_channels, /** Number of output feature maps. */
+ const int n_input_channels /** Number of input feature maps. */
+ );
+
+ static unsigned int get_weight_stride(
+ const int n_output_channels, /** Number of output feature maps. */
+ const int n_input_channels /** Number of input feature maps. */
+ );
+
+ static unsigned int get_weight_multi_stride(
+ const int n_output_channels, /** Number of output feature maps. */
+ const int n_input_channels /** Number of input feature maps. */
+ );
+
+ /** Determine how much memory (in units of TIn) to allocate for the
+ * transformed input.
+ */
+ static unsigned int get_input_storage_size(
+ const int n_batches, /** Number of batches in the input tensor. */
+ const int n_channels, /** Number of feature maps in the input tensor. */
+ const int n_rows, /** Number of rows in each feature map. */
+ const int n_cols, /** Number of columns in each feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Get the row stride for the A matrix in the Winograd domain. */
+ static unsigned int get_input_stride(
+ const int n_batches, /** Number of batches in the input tensor. */
+ const int n_channels, /** Number of feature maps in the input tensor. */
+ const int n_rows, /** Number of rows in each feature map. */
+ const int n_cols, /** Number of columns in each feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Get the stride between A matrices in the Winograd domain. */
+ static unsigned int get_input_multi_stride(
+ const int n_batches, /** Number of batches in the input tensor. */
+ const int n_channels, /** Number of feature maps in the input tensor. */
+ const int n_rows, /** Number of rows in each feature map. */
+ const int n_cols, /** Number of columns in each feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Determine how much memory (in units of TOut) to allocate for the
+ * (Winograd domain) output.
+ */
+ static unsigned int get_output_storage_size(
+ const int n_batches, /** Number of batches in the output tensor. */
+ const int n_rows, /** Number of rows in each feature map of the input tensor. */
+ const int n_cols, /** Number of columns in each feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ static unsigned int get_output_stride(
+ const int n_batches, /** Number of batches in the output tensor. */
+ const int n_rows, /** Number of rows in each feature map of the input tensor. */
+ const int n_cols, /** Number of columns in each feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ static unsigned int get_output_multi_stride(
+ const int n_batches, /** Number of batches in the output tensor. */
+ const int n_rows, /** Number of rows in each feature map of the input tensor. */
+ const int n_cols, /** Number of columns in each feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Get the shape (rows, cols) of a feature map of the output tensor. */
+ static std::pair<int, int> get_output_feature_map_shape(
+ const int n_input_rows, /** Number of rows in the input feature map. */
+ const int n_input_cols, /** Number of columns in the input feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Create a new Winograd convolution layer.
+ */
+ WinogradConvolutionLayer(
+ const arm_gemm::CPUInfo &cpuinfo, /** Describes CPU properties. */
+ const int n_threads, /** Maximum number of threads used to execute the convolution. */
+ const int n_batches, /** Number of batches in the input and output tensors. */
+ const int n_input_channels, /** Number of feature maps in a batch of the input tensor. */
+ const int n_input_rows, /** Number of rows in a feature map of the input tensor. */
+ const int n_input_cols, /** Number of columns in a feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding, /** Use "SAME" padding, otherwise use "VALID". */
+ const TIn* const weights, /** Pointer to weight tensor in spatial domain. Must be ordered as "Height x Rows x Input Feature Maps x Output Feature Maps. */
+ TInGEMM* const weights_storage, /** Pointer to storage for weight tensor in the Winograd domain. Must be at least the size returned by `get_weight_storage_size`. */
+ const TIn* const input, /** Pointer to NHWC ordered input tensor, in the spatial domain. */
+ TInGEMM* const winograd_input, /** Pointer to working space for the input tensor in the Winograd domain. Must be at least the size returned by `get_input_storage_size`. */
+ const TOut* const biases, /** Pointer to biases vector. Pass nullptr if no bias is provided. */
+ TOut* const output, /** Pointer to NHWC ordered output tensor, in the spatial domain. */
+ TOutGEMM* const winograd_output, /** Pointer to working space for the output tensor in the Winograd domain. Must be at least the size returned by `get_output_storage_size`. */
+ const bool pretranspose_B=true, /** Hint that the B matrix can be pretransposed. */
+ arm_gemm::GemmConfig *gemm_cfg=nullptr /** Pointer to GEMM configuration. */
+ );
+
+ /* Utility methods for interacting with the layer. */
+ unsigned int weight_transform_get_window(void) const;
+ void weight_transform_run(const unsigned int start, const unsigned int stop);
+
+ ITransform& input_transform(void);
+ ITransform& output_transform(void);
+
+ /* Get a pointer to the GEMM underlying the Winograd transform. */
+ arm_gemm::IGemmCommon *gemm(void);
+};
+
+}
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp
deleted file mode 100644
index 07a0b8666a..0000000000
--- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#pragma once
-
-namespace winograd
-{
-
-
-namespace
-{
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
-class OutputTransformImplTiles
-{
- public:
- typedef void (*TileFn)(
- const int n_channels, /** @param[in] Number of channels in output tensor */
- const T* const matrix_base, /** @param[in] Base pointer to Winograd output matrices. */
- const int matrix_stride, /** @param[in] Stride between matrices in the output space. */
- const T* const biases, /** @param[in] Pointer to bias vector (may be nullptr). */
- T* const output, /** @param[out] Pointer to output tensor. */
- const int output_row_stride, /** @param[in] Stride across rows of the output tensor. */
- const int output_col_stride, /** @param[in] Stride between columns of the output tensor. */
- const int _pad_bottom, /** @param[in] Bottom padding for unspecialised tiles. */
- const int _pad_right /** @param[in] Right padding for unspecialised tiles. */
- );
-
- static TileFn get_tile_specialization(
- const int pad_bottom,
- const int pad_right
- );
-
- static constexpr unsigned int OutputTileRows = InnerTileRows - KernelRows + 1;
- static constexpr unsigned int OutputTileCols = InnerTileCols - KernelCols + 1;
-
- private:
- static constexpr unsigned int n_pad_bottom = OutputTileRows - 1;
- static constexpr unsigned int n_pad_right = OutputTileCols - 1;
-
- static const TileFn tilefn_generic; /** Generic tile processing function. */
- static const TileFn tilefn_unpadded; /** Tile processor for unpadded tiles. */
- static const TileFn tilefn_bottom_padded[n_pad_bottom]; /** Bottom padding only. */
- static const TileFn tilefn_right_padded[n_pad_right]; /** Right padding only. */
-};
-
-template <int KernelCols, int InnerTileCols, typename T>
-class OutputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>
-{
- public:
- typedef void (*TileFn)(
- const int n_channels, /** @param[in] Number of channels in output tensor */
- const T* const matrix_base, /** @param[in] Base pointer to Winograd output matrices. */
- const int matrix_stride, /** @param[in] Stride between matrices in the output space. */
- const T* const biases, /** @param[in] Pointer to bias vector (may be nullptr). */
- T* const output, /** @param[out] Pointer to output tensor. */
- const int output_row_stride, /** @param[in] Stride across rows of the output tensor. */
- const int output_col_stride, /** @param[in] Stride between columns of the output tensor. */
- const int _pad_bottom, /** @param[in] Bottom padding for unspecialised tiles. */
- const int _pad_right /** @param[in] Right padding for unspecialised tiles. */
- );
-
- static TileFn get_tile_specialization(
- const int pad_bottom,
- const int pad_right
- );
-
- static constexpr unsigned int OutputTileRows = 1;
- static constexpr unsigned int OutputTileCols = InnerTileCols - KernelCols + 1;
-
- private:
- static constexpr unsigned int n_pad_right = OutputTileCols - 1;
-
- static const TileFn tilefn_unpadded; /** Tile processor for unpadded tiles. */
- static const TileFn tilefn_right_padded[n_pad_right]; /** Right padding only. */
-};
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
-class OutputTransformImpl
-{
- private:
- static void process_tile_row(
- const int tile_N,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int row_pad_bottom,
- const int row_pad_right
- );
-
- using Tiles = OutputTransformImplTiles<
- KernelRows, KernelCols, InnerTileRows, InnerTileCols, T
- >;
-
- public:
- /** Apply the output transform to a tensor. */
- static void execute(
- const int n_batches,
- const int out_batch_stride,
- const int n_rows,
- const int out_row_stride,
- const int n_cols,
- const int out_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- );
-
- static constexpr unsigned int OutputTileRows = Tiles::OutputTileRows;
- static constexpr unsigned int OutputTileCols = Tiles::OutputTileCols;
-};
-
-template <int KernelRows, int InnerTileRows, typename T>
-class OutputTransformImpl<KernelRows, 1, InnerTileRows, 1, T>
-{
- public:
- /** Apply the output transform to a tensor. */
- static void execute(
- const int n_batches,
- const int out_batch_stride,
- const int n_rows,
- const int out_row_stride,
- const int n_cols,
- const int out_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- );
-
- static constexpr unsigned int OutputTileRows = InnerTileRows - KernelRows + 1;
- static constexpr unsigned int OutputTileCols = 1;
-};
-
-} // namespace (anonymous)
-
-template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
-class OutputTransform
-{
- public:
- /***********************************************************************/
- /** Create an OutputTransform operator fixed on a given problem and set
- * of pointers.
- */
- OutputTransform(
- const T* const matrix_base, /** Pointer to base of matrices. */
- const int matrix_stride, /** Stride between matrices. */
- const int matrix_row_stride, /** Stride within a matrix. */
- const T* const biases, /** Pointer to biases vector. */
- T* const output, /** Pointer to output tensor. */
- const int n_batches, /** Number of batches in output tensor. */
- const int n_rows, /** Number of rows in output tensor. */
- const int n_cols, /** Number of columns in output tensor. */
- const int n_channels, /** Number of channels in output tensor. */
- const int out_batch_stride=0, /** Output batch stride. */
- const int out_row_stride=0, /** Output row stride. */
- const int out_col_stride=0 /** Output column stride. */
- );
-
- /** Get the window of work a given operator can perform. */
- unsigned int get_window() const;
- static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
-
- /** Perform work upon a window of the input. */
- void run(const unsigned int start, const unsigned int stop);
-
- /** Apply the transform to create a tensor. */
- static void execute(
- const int n_batches,
- const int out_batch_stride,
- const int n_rows,
- const int out_row_stride,
- const int n_cols,
- const int out_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- );
-
- private:
- using Transform = OutputTransformImpl<
- KernelRows, KernelCols, InnerTileRows, InnerTileCols, T
- >;
-
- static constexpr unsigned int OutputTileRows = Transform::OutputTileRows;
- static constexpr unsigned int OutputTileCols = Transform::OutputTileCols;
-
- /** Member constants for instances of the transform. */
- const T* const _matrix_base;
- const T* const _biases;
- const int _matrix_stride, _matrix_row_stride;
- T* const _outptr;
- const int _n_batches, _n_rows, _n_cols, _n_channels, _tile_M, _tile_N;
- const int _out_col_stride, _out_row_stride, _out_batch_stride;
-};
-
-} // namespace winograd
-
diff --git a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
index 292c70b87c..ad37ba51ab 100644
--- a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -113,6 +113,8 @@ private:
CPPPermute _permute_input;
CPPPermute _permute_weights;
CPPPermute _permute_output;
+ Tensor _input_transformed;
+ Tensor _output_transformed;
Tensor _input_workspace;
Tensor _output_workspace;
Tensor _kernel_storage;
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 3e76a080fd..263ded0b84 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -238,8 +238,7 @@ unsigned int NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTile
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::NEWinogradLayerTransformWeightsKernel()
- : _weights_hwio(nullptr), _output(nullptr), _matrix_stride(0), _num_output_channels(0), _num_input_channels(0)
-
+ : _transform(nullptr), _weights_hwio(nullptr), _output(nullptr), _matrix_stride(0), _num_output_channels(0), _num_input_channels(0)
{
}
@@ -263,11 +262,10 @@ void NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, Ke
_matrix_stride = matrix_stride;
_num_output_channels = num_output_channels;
_num_input_channels = num_input_channels;
+ _transform = arm_compute::support::cpp14::make_unique<WeightsTransform>(num_output_channels, num_input_channels);
- const int matrix_row_stride = roundup(num_output_channels, WinogradConv::N_BLOCK);
- WeightsTransform transform(nullptr, nullptr, matrix_stride, matrix_row_stride, num_output_channels, num_input_channels);
- Window win;
- auto win_last = transform.get_window();
+ Window win;
+ auto win_last = _transform->get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
INEKernel::configure(win);
}
@@ -278,12 +276,14 @@ void NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, Ke
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ const size_t fst = window.x().start();
+ const size_t lst = window.x().end();
+ _transform->set_weight_tensor(_weights_hwio->buffer());
+ const int matrix_row_stride = roundup(_num_output_channels, WinogradConv::N_BLOCK);
+ _transform->set_output_matrices(_output->buffer(), _matrix_stride, matrix_row_stride);
+ _transform->set_working_space(_output->buffer());
- const int matrix_row_stride = roundup(_num_output_channels, WinogradConv::N_BLOCK);
- WeightsTransform transform(reinterpret_cast<T *>(_weights_hwio->buffer()), reinterpret_cast<T *>(_output->buffer()), _matrix_stride, matrix_row_stride, _num_output_channels, _num_input_channels);
- const size_t fst = window.x().start();
- const size_t lst = window.x().end();
- transform.run(fst, lst);
+ _transform->run(fst, lst);
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
@@ -331,6 +331,12 @@ unsigned int NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCo
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
+unsigned int NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_working_space_size(unsigned int num_threads) const
+{
+ return _transform->get_working_space_size(num_threads) / sizeof(T);
+}
+
+template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
int NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_matrix_stride(
const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const
{
@@ -339,7 +345,8 @@ int NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kerne
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::NEWinogradLayerTransformInputKernel()
- : _input_nhwc(), _num_batches(0), _num_rows(0), _num_cols(0), _num_channels(0), _padding(), _output(nullptr), _matrix_stride(0)
+ : _transform(nullptr), _input_nhwc(nullptr), _num_batches(0), _num_rows(0), _num_cols(0), _num_channels(0), _padding(), _output(nullptr), _matrix_stride(0), _padding_top(), _padding_left(),
+ _padding_right(), _padding_bottom(), _workspace(nullptr)
{
}
@@ -352,7 +359,8 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
const int num_channels, /* Number of channels in input tensor. */
const PaddingType padding, /* Padding type. */
ITensor *output, /* Base of output matrices. */
- const int matrix_stride) /* Stride between output matrices. */
+ const int matrix_stride, /* Stride between output matrices. */
+ ITensor *workspace)
{
_input_nhwc = input_nhwc;
_num_batches = num_batches;
@@ -362,9 +370,28 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
_padding = padding;
_output = output;
_matrix_stride = matrix_stride;
- InputTransform transform(nullptr, num_batches, num_rows, num_cols, num_channels, padding, nullptr, matrix_stride, num_channels);
- Window win;
- auto win_last = transform.get_window();
+ _workspace = workspace;
+
+ _padding_top = (padding == PADDING_SAME) ? (KernelRows - 1) / 2 : 0;
+ _padding_left = (padding == PADDING_SAME) ? (KernelCols - 1) / 2 : 0;
+ _padding_bottom = (padding == PADDING_SAME) ? iceildiv(KernelRows - 1, 2) : 0;
+ _padding_right = (padding == PADDING_SAME) ? iceildiv(KernelCols - 1, 2) : 0;
+
+ _transform = arm_compute::support::cpp14::make_unique<InputTransform>(
+ KernelRows,
+ KernelCols,
+ num_batches,
+ num_rows,
+ num_cols,
+ num_channels,
+ _padding_top, /**< Padding to apply to the top of the image. */
+ _padding_left, /**< Padding to apply to the left of the image. */
+ _padding_bottom, /**< Padding to apply to the bottom of the image. */
+ _padding_right /**< Padding to apply to the right of the image. */
+ );
+
+ Window win;
+ auto win_last = _transform->get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
INEKernel::configure(win);
}
@@ -374,22 +401,25 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_workspace);
- const int element_size_in_bytes = _input_nhwc->info()->element_size();
- const int input_col_stride = _input_nhwc->info()->strides_in_bytes().y() / element_size_in_bytes;
- const int input_row_stride = _input_nhwc->info()->strides_in_bytes().z() / element_size_in_bytes;
- const int input_batch_stride = _input_nhwc->info()->strides_in_bytes()[3] / element_size_in_bytes;
- const auto input_nhwc_ptr = reinterpret_cast<const T *>(_input_nhwc->buffer() + _input_nhwc->info()->offset_first_element_in_bytes());
- auto output_ptr = reinterpret_cast<T *>(_output->buffer() + _output->info()->offset_first_element_in_bytes());
- InputTransform input_transform(input_nhwc_ptr,
- _num_batches, _num_rows, _num_cols, _num_channels, _padding,
- output_ptr,
- _matrix_stride, _num_channels, input_batch_stride, input_row_stride, input_col_stride);
+ const int element_size_in_bytes = _input_nhwc->info()->element_size();
+ const int input_col_stride = _input_nhwc->info()->strides_in_bytes().y() / element_size_in_bytes;
+ const int input_row_stride = _input_nhwc->info()->strides_in_bytes().z() / element_size_in_bytes;
+ const int input_batch_stride = _input_nhwc->info()->strides_in_bytes()[3] / element_size_in_bytes;
+ const auto input_nhwc_ptr = reinterpret_cast<const T *>(_input_nhwc->buffer() + _input_nhwc->info()->offset_first_element_in_bytes());
+ auto output_ptr = reinterpret_cast<T *>(_output->buffer() + _output->info()->offset_first_element_in_bytes());
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output_ptr);
+
+ _transform->set_input_tensor(input_nhwc_ptr, input_batch_stride, input_row_stride, input_col_stride);
+ _transform->set_output_matrices(output_ptr, _matrix_stride, _num_channels);
+
+ _transform->set_working_space(_workspace->buffer());
// The code below cannot be moved to configure because biases hasn't been allocated at that point
const size_t fst = window.x().start();
const size_t lst = window.x().end();
- input_transform.run(fst, lst);
+ _transform->run(fst, lst, info.thread_id);
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
@@ -435,11 +465,18 @@ unsigned int NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileC
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::NEWinogradLayerTransformOutputKernel()
- : _biases(nullptr), _output_workspace(nullptr), _matrix_stride(0), _matrix_row_stride(0), _output_nhwc(nullptr), _num_batches(0), _num_rows(0), _num_cols(0), _num_channels(0)
+ : _transform(nullptr), _biases(nullptr), _transformed_output(nullptr), _workspace(nullptr), _matrix_stride(0), _matrix_row_stride(0), _output_nhwc(nullptr), _num_batches(0), _num_rows(0),
+ _num_cols(0), _num_channels(0)
{
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
+unsigned int NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_working_space_size(unsigned int num_threads) const
+{
+ return _transform->get_working_space_size(num_threads) / sizeof(T);
+}
+
+template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
int NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_matrix_stride(
const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const
{
@@ -455,28 +492,29 @@ Tensor4DShape NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTile
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::configure(
const ITensor *biases,
- const ITensor *output_workingspace,
+ const ITensor *transformed_output,
const int matrix_stride,
ITensor *output_nhwc,
const int num_batches,
const int num_rows,
const int num_cols,
- const int num_channels)
-{
- _biases = biases;
- _output_workspace = output_workingspace;
- _matrix_stride = matrix_stride;
- _matrix_row_stride = roundup(num_channels, WinogradConv::N_BLOCK);
- _output_nhwc = output_nhwc;
- _num_batches = num_batches;
- _num_rows = num_rows;
- _num_cols = num_cols;
- _num_channels = num_channels;
+ const int num_channels,
+ ITensor *workspace)
+{
+ _biases = biases;
+ _workspace = workspace;
+ _transformed_output = transformed_output;
+ _matrix_stride = matrix_stride;
+ _matrix_row_stride = roundup(num_channels, WinogradConv::N_BLOCK);
+ _output_nhwc = output_nhwc;
+ _num_batches = num_batches;
+ _num_rows = num_rows;
+ _num_cols = num_cols;
+ _num_channels = num_channels;
// We don't have the biases buffer at this stage as it hasn't been allocated, we pass in nullptr OutputTransform is only used here to compute the window
- OutputTransform output_transform(nullptr, _matrix_stride, _matrix_row_stride, nullptr, nullptr, _num_batches, _num_rows, _num_cols, _num_channels);
-
+ _transform = arm_compute::support::cpp14::make_unique<OutputTransform>(num_batches, num_rows, num_cols, num_channels);
Window win;
- auto win_last = output_transform.get_window();
+ auto win_last = _transform->get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
_output_nhwc->info()->set_valid_region(ValidRegion(Coordinates(), _output_nhwc->info()->tensor_shape()));
@@ -488,22 +526,22 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_NULLPTR(_output_workspace);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_workspace);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_transformed_output);
ARM_COMPUTE_ERROR_ON_NULLPTR(_output_nhwc);
- const int out_batch_stride = 0;
+ const int out_batch_stride = _output_nhwc->info()->strides_in_bytes()[3] / sizeof(T);
const int out_row_stride = _output_nhwc->info()->strides_in_bytes()[2] / sizeof(T);
const int out_col_stride = _output_nhwc->info()->strides_in_bytes()[1] / sizeof(T);
- OutputTransform output_transform(reinterpret_cast<T *>(_output_workspace->buffer()), _matrix_stride, _matrix_row_stride,
- (_biases ? reinterpret_cast<T *>(_biases->buffer() + _biases->info()->offset_first_element_in_bytes()) : nullptr),
- reinterpret_cast<T *>(_output_nhwc->buffer() + _output_nhwc->info()->offset_first_element_in_bytes()),
- _num_batches, _num_rows, _num_cols, _num_channels, out_batch_stride, out_row_stride, out_col_stride);
-
+ _transform->set_input_matrices(_transformed_output->buffer(), _matrix_stride, _matrix_row_stride);
+ _transform->set_bias((_biases ? reinterpret_cast<T *>(_biases->buffer() + _biases->info()->offset_first_element_in_bytes()) : nullptr));
+ _transform->set_output_tensor(_output_nhwc->buffer() + _output_nhwc->info()->offset_first_element_in_bytes(), out_batch_stride, out_row_stride, out_col_stride);
+ _transform->set_working_space(_workspace->buffer());
// The code below cannot be moved to configure because biases hasn't been allocated at that point
const size_t fst = window.x().start();
const size_t lst = window.x().end();
- output_transform.run(fst, lst);
+ _transform->run(fst, lst, info.thread_id);
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
diff --git a/src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp
deleted file mode 100644
index ac83bf9dd2..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/gemm.hpp"
-
-using namespace winograd;
-
-template <const int MB, const int NB, typename TIn, typename TOut>
-BatchedBlockedGemm<MB, NB, TIn, TOut>::BatchedBlockedGemm(
- const unsigned int n_gemms,
- const int M, const int K, const int N,
- const int a_matrix_stride,
- const int a_row_stride,
- const int b_matrix_stride,
- const int b_row_stride,
- const int c_matrix_stride,
- const int c_row_stride,
- const TIn* const a_ptr,
- const TIn* const b_ptr,
- TOut* const c_ptr
-) : n_gemms(n_gemms), M(M), N(N), K(K),
- a_matrix_stride(a_matrix_stride),
- a_row_stride(a_row_stride),
- b_matrix_stride(b_matrix_stride),
- b_row_stride(b_row_stride),
- c_matrix_stride(c_matrix_stride),
- c_row_stride(c_row_stride),
- a_ptr(a_ptr), b_ptr(b_ptr), c_ptr(c_ptr)
-{
-}
-
-template <const int MBlock, const int NBlock, typename TIn, typename TOut>
-unsigned int BatchedBlockedGemm<MBlock, NBlock, TIn, TOut>::get_window() const
-{
- return n_gemms;
-}
-
-template <const int MBlock, const int NBlock, typename TIn, typename TOut>
-void BatchedBlockedGemm<MBlock, NBlock, TIn, TOut>::run(
- const unsigned int start, const unsigned int stop
-)
-{
- // Perform the specified GEMMs
- for (unsigned int i = start; i < stop; i++)
- {
- // Get pointers to the relevant matrices
- const TIn* const mtr_a = a_ptr + i*a_matrix_stride;
- const TIn* const mtr_b = b_ptr + i*b_matrix_stride;
- TOut* const mtr_c = c_ptr + i*c_matrix_stride;
-
- // Perform the GEMM
- BlockedGemm<MBlock, NBlock, TIn, TOut>(
- mtr_a, mtr_b, mtr_c, M, K, N,
- a_row_stride, b_row_stride, c_row_stride
- );
- }
-}
-
-template class winograd::BatchedBlockedGemm<4, 16, float, float>;
-
diff --git a/src/core/NEON/kernels/convolution/winograd/padding.cpp b/src/core/NEON/kernels/convolution/winograd/padding.cpp
new file mode 100644
index 0000000000..46fe57c7c9
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/padding.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <cstring>
+#include <cstdint>
+
+#include "padding.hpp"
+
+namespace padding
+{
+
+template <typename T>
+void copy_and_pad_tile(
+ const unsigned int tile_rows,
+ const unsigned int tile_cols,
+ const unsigned int n_channels,
+ const T* const inptr,
+ const unsigned int in_row_stride,
+ const unsigned int in_col_stride,
+ T* const outptr,
+ const unsigned int out_row_stride,
+ const unsigned int out_col_stride,
+ const unsigned int pad_top,
+ const unsigned int pad_left,
+ const unsigned int pad_bottom,
+ const unsigned int pad_right,
+ const T pad_value
+)
+{
+ for (unsigned int out_i = 0; out_i < tile_rows; out_i++)
+ {
+ for (unsigned int out_j = 0; out_j < tile_cols; out_j++)
+ {
+ T* const output = outptr + out_i*out_row_stride + out_j*out_col_stride;
+
+ if (out_i < pad_top || tile_rows - pad_bottom <= out_i ||
+ out_j < pad_left || tile_cols - pad_right <= out_j)
+ {
+ for (unsigned int n = 0; n < n_channels; n++)
+ {
+ output[n] = pad_value;
+ }
+ }
+ else
+ {
+ const auto in_i = out_i - pad_top, in_j = out_j - pad_left;
+ const T* const input = inptr + in_i*in_row_stride + in_j*in_col_stride;
+ std::memcpy(output, input, n_channels * sizeof(T));
+ }
+ }
+ }
+}
+
+template void copy_and_pad_tile(
+ unsigned int, unsigned int, unsigned int,
+ const uint8_t *, unsigned int, unsigned int,
+ uint8_t *, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int, unsigned int, uint8_t
+);
+
+template void copy_and_pad_tile(
+ unsigned int, unsigned int, unsigned int,
+ const float *, unsigned int, unsigned int,
+ float *, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int, unsigned int, float
+);
+
+template <unsigned int TileRows, unsigned int TileCols>
+void CopyCropped<TileRows, TileCols>::execute(
+ const size_t size,
+ const void * const inptr,
+ const size_t in_row_stride,
+ const size_t in_col_stride,
+ void * const outptr,
+ const size_t out_row_stride,
+ const size_t out_col_stride,
+ const unsigned int pad_top,
+ const unsigned int pad_left,
+ const unsigned int pad_bottom,
+ const unsigned int pad_right
+)
+{
+ for (unsigned int out_i = 0, in_i = pad_top; in_i < TileRows - pad_bottom; out_i++, in_i++)
+ {
+ for (unsigned int out_j = 0, in_j = pad_left; in_j < TileCols - pad_right; out_j++, in_j++)
+ {
+ std::memcpy(
+ static_cast<uint8_t *>(outptr) + out_i*out_row_stride + out_j*out_col_stride,
+ static_cast<const uint8_t *>(inptr) + in_i*in_row_stride + in_j*in_col_stride,
+ size
+ );
+ }
+ }
+}
+
+template class CopyCropped<2, 2>;
+template class CopyCropped<3, 3>;
+template class CopyCropped<4, 4>;
+
+template <typename T>
+void crop_and_copy_tile(
+ unsigned int tile_rows,
+ unsigned int tile_cols,
+ unsigned int n_channels,
+ const T *inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ T *outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride,
+ unsigned int crop_top,
+ unsigned int crop_left,
+ unsigned int crop_bottom,
+ unsigned int crop_right
+)
+{
+ for (unsigned int out_i = 0, in_i = crop_top; in_i < tile_rows - crop_bottom; out_i++, in_i++)
+ {
+ for (unsigned int out_j = 0, in_j = crop_left; in_j < tile_cols - crop_right; out_j++, in_j++)
+ {
+ std::memcpy(
+ outptr + out_i*out_row_stride + out_j*out_col_stride,
+ inptr + in_i*in_row_stride + in_j*in_col_stride,
+ sizeof(T) * n_channels
+ );
+ }
+ }
+}
+
+template void crop_and_copy_tile(
+ unsigned int tile_rows,
+ unsigned int tile_cols,
+ unsigned int n_channels,
+ const float *inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ float *outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride,
+ unsigned int crop_top,
+ unsigned int crop_left,
+ unsigned int crop_bottom,
+ unsigned int crop_right
+);
+
+} // namespace padding
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp
deleted file mode 100644
index 893122cc45..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_6x6_fp32_process_tile(
- int n_channels,
- const float* const input_base,
- const int input_row_stride,
- const int input_col_stride,
- float* const matrix_base,
-const int matrix_stride,
- const int _pad_top,
- const int _pad_left,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- const int pad_top = Specialized ? PadTop : _pad_top;
- const int pad_left = Specialized ? PadLeft : _pad_left;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- constexpr int inner_tile_rows = 6;
- constexpr int inner_tile_cols = 6;
-
- const int cells_i = inner_tile_rows - pad_bottom;
- const int cells_j = inner_tile_cols - pad_right;
-
- float *outptr = matrix_base;
-
- // Get pointers into the input tile
- const float *x_ptrs[inner_tile_rows][inner_tile_cols];
- for (int i = pad_top, xi = 0; i < cells_i; i++, xi++)
- {
- // Get a pointer into the row
- const float* const row_ptr = input_base + xi*input_row_stride;
-
- for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
- {
- x_ptrs[i][j] = row_ptr + xj*input_col_stride;
- }
- }
-
- // Matrices used/computed in this kernel.
- float x[inner_tile_rows][inner_tile_cols];
- float XTx[inner_tile_rows][inner_tile_cols];
- float U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = XTx[i][j] = 0.0f;
- }
- }
-
- // Perform the Winograd input transformation for each channel in the input
- // tensor.
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used/computed in this kernel
- float32x4_t x[inner_tile_rows][inner_tile_cols];
- float32x4_t XTx[inner_tile_rows][inner_tile_cols];
- float32x4_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdupq_n_f32(0.0f);
- XTx[i][j] = vdupq_n_f32(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = vld1q_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 4;
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vmlsq_n_f32(vmlaq_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vmlsq_n_f32(vaddq_f32(x[3][j], x[4][j]), vaddq_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[3][j]), vsubq_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[2][j]), vsubq_f32(x[3][j], x[1][j]), 2.0f);
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[2][j]), vsubq_f32(x[1][j], x[3][j]), 2.0f);
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vmlsq_n_f32(vmlaq_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vmlsq_n_f32(vmlaq_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vmlsq_n_f32(vaddq_f32(XTx[i][3], XTx[i][4]), vaddq_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][3]), vsubq_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][2]), vsubq_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][2]), vsubq_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vmlsq_n_f32(vmlaq_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used/computed in this kernel
- float32x2_t x[inner_tile_rows][inner_tile_cols];
- float32x2_t XTx[inner_tile_rows][inner_tile_cols];
- float32x2_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++)
- {
- x[i][j] = vdup_n_f32(0.0f);
- XTx[i][j] = vdup_n_f32(0.0f);
- }
- }
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = vld1_f32(x_ptrs[i][j]);
- x_ptrs[i][j] += 2;
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
- // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
-
- // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
-
- // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
-
- // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
- // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
- // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
- // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
- // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, U[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Load x
- for (int i = pad_top; i < cells_i; i++)
- {
- for (int j = pad_left; j < cells_j; j++)
- {
- x[i][j] = *(x_ptrs[i][j]++);
- }
- }
-
- // Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
- {
- XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
- XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
- XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
- XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
- XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
- XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
- }
-
- // Compute U = XT . x . X
- for (int i = 0; i < inner_tile_rows; i++)
- {
- U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
- U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
- U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
- U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
- U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
- U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
- }
-
- // Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_rows; i++)
- {
- for (int j = 0; j < inner_tile_cols; j++, m++)
- {
- *(outptr + m*matrix_stride) = U[i][j];
- }
- }
- outptr++;
- }
-}
-}
-
-namespace winograd
-{
-template <int k>
-using Tiles = InputTransformImplTiles<k, k, 6, 6, float>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_generic = winograd_input_transform_6x6_fp32_process_tile<false>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_unpadded = winograd_input_transform_6x6_fp32_process_tile<true>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_top_padded[n_pad_top] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 1, 0, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 1, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 2, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 3, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 4, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 5, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 6, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 6>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_generic = winograd_input_transform_6x6_fp32_process_tile<false>;
-
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_unpadded = winograd_input_transform_6x6_fp32_process_tile<true>;
-
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_top_padded[n_pad_top] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 2, 0, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 2, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 1, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 2, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 3, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 4, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 5, 0>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 6, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 6>,
-};
-
-template class InputTransform<3, 3, 6, 6, float>;
-template class InputTransform<5, 5, 6, 6, float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp
deleted file mode 100644
index 597b074026..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_2x2_3x3_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- constexpr int OutputTileRows = 2, OutputTileCols = 2;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- const int cells_i = OutputTileRows - pad_bottom;
- const int cells_j = OutputTileCols - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[OutputTileRows][OutputTileCols];
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- if (bptr)
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Load the bias vector
- b = vld1q_f32(bptr);
- bptr += 4;
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Load the bias vector
- b = vld1_f32(bptr);
- bptr += 2;
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[4][4], FZ[4][2], f[2][2], b;
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- }
-
- // Load the bias
- b = *(bptr++);
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j] + b;
- }
- }
- }
- }
- else
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[4][4], FZ[4][2], f[2][2];
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[4][4], FZ[4][2], f[2][2];
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
- // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
- // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[4][4], FZ[4][2], f[2][2];
-
- // Read a 4x4 tile in the Winograd domain
- for (int i = 0, m = 0; i < 4; i++)
- {
- for (int j = 0; j < 4; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 4; i++)
- {
- FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
- FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
- f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j];
- }
- }
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<3, 3, 4, 4, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_2x2_3x3_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2x2_3x3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_output_transform_2x2_3x3_fp32_process_tile<true, 1, 0>
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_2x2_3x3_fp32_process_tile<true, 0, 1>
-};
-
-template class OutputTransform<3, 3, 4, 4, float>;
-} // namespace winograd
-
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp
deleted file mode 100644
index 60d7181d97..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_2x2_5x5_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- constexpr int OutputTileRows = 2, OutputTileCols = 2;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- const int cells_i = 2 - pad_bottom;
- const int cells_j = 2 - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[OutputTileRows][OutputTileCols];
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- if (bptr)
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1q_f32(bptr);
- bptr += 4;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1_f32(bptr);
- bptr += 2;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][2], f[2][2], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- b = *(bptr++);
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j] + b;
- }
- }
- }
- }
- else
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][2], f[2][2];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][2], f[2][2];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][2], f[2][2];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 2; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j];
- }
- }
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<5, 5, 6, 6, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_2x2_5x5_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2x2_5x5_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_output_transform_2x2_5x5_fp32_process_tile<true, 1, 0>
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_2x2_5x5_fp32_process_tile<true, 0, 1>
-};
-
-template class OutputTransform<5, 5, 6, 6, float>;
-} // namespace winograd
-
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp
deleted file mode 100644
index 15cc04b352..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_4x4_3x3_fp32_process_tile(
- const int n_channels,
- const float* const matrix_base,
- const int matrix_stride,
- const float* const biases,
- float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
-)
-{
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
- constexpr int TileRows = 4, TileCols = 4;
-
- const int cells_i = TileRows - pad_bottom;
- const int cells_j = TileCols - pad_right;
-
- // Construct a map to the output cells
- float *outptrs[TileRows][TileCols];
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
- }
- }
- const float *inptr = matrix_base;
- const float *bptr = biases;
-
- if (bptr)
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1q_f32(bptr);
- bptr += 4;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- b = vld1_f32(bptr);
- bptr += 2;
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
- outptrs[i][j] += 2;
- }
- }
- }
-#endif
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][4], f[4][4], b;
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- b = *(bptr++);
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j] + b;
- }
- }
- }
- }
- else
- {
- // For each channel of the output
- int channels_remaining = n_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed during this transform
- float32x4_t F[6][6], FZ[6][4], f[4][4];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1q_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 4;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1q_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 4;
- }
- }
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed during this transform
- float32x2_t F[6][6], FZ[6][4], f[4][4];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = vld1_f32(inptr + m*matrix_stride);
- }
- }
- inptr += 2;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
- // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
- // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
- // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
- // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
- // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
- // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- vst1_f32(outptrs[i][j], f[i][j]);
- outptrs[i][j] += 2;
- }
- }
- }
-#endif
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed during this transform
- float F[6][6], FZ[6][4], f[4][4];
-
- // Read a 6x6 tile in the Winograd domain
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- F[i][j] = *(inptr + m*matrix_stride);
- }
- }
- inptr++;
-
- // Compute the matrix F Z
- for (int i = 0; i < 6; i++)
- {
- FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
- FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
- FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
- FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
- }
-
- // Compute the output tile f = ZT F Z
- for (int j = 0; j < 4; j++)
- {
- f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
- f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
- f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
- f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
- }
-
- // Write out the output tile
- for (int i = 0; i < cells_i; i++)
- {
- for (int j = 0; j < cells_j; j++)
- {
- *(outptrs[i][j]++) = f[i][j];
- }
- }
- }
- }
-}
-
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<3, 3, 6, 6, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_4x4_3x3_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_4x4_3x3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 1, 0>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 2, 0>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 3, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 1>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 2>,
- winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 3>,
-};
-
-template class OutputTransform<3, 3, 6, 6, float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
deleted file mode 100644
index 85cf418656..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[kernel_cols];
- for (int j = 0; j < kernel_cols; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[kernel_cols], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < kernel_cols; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36.0f;
- V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
- V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
- V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
- V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
- V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
- V[7] = (w[6]*1) / 1.0f;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template <>
- template <>
- void WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Redirect to the 1xN implementation
- WinogradGEMM<1, 2, 1, 7>::template WeightsTransform<float>::execute(
- n_output_channels, n_input_channels, input, output, matrix_stride,
- matrix_row_stride
- );
- }
-
- template <>
- template <>
- int WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template struct WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>;
- template struct WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp
deleted file mode 100644
index 6c71461f81..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input,
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- constexpr int inner_tile_i = 4;
- constexpr int inner_tile_j = 4;
-
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const float *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
-
- // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
-
- // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
-
- // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
-
- // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = w[0][j];
- Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
- Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
- Ww[3][j] = w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < inner_tile_i; i++)
- {
- V[i][0] = Ww[i][0];
- V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
- V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
- V[i][3] = Ww[i][2];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < inner_tile_i; i++)
- {
- for (int j = 0; j < inner_tile_j; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- const int channel_prod = shape.n_input_channels * shape.n_output_channels;
- return 2 * 18 * channel_prod;
- }
-
- template struct WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp
deleted file mode 100644
index 2f4f6e1ba2..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input,
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 5 * weight_col_stride;
- const float *inptrs[5][5];
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- // Ww[0][j] = w[0][j]/4.0f;
- Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
-
- // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[1][j] = vmulq_n_f32(
- vaddq_f32(
- vaddq_f32(
- vaddq_f32(w[1][j], w[0][j]),
- vaddq_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- -1.0f/6.0f
- );
-
- // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
- Ww[2][j] = vmulq_n_f32(
- vsubq_f32(
- vaddq_f32(
- vsubq_f32(w[1][j], w[0][j]),
- vsubq_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- 1.0f/6.0f
- );
-
- // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[3][j] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
- vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
- vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[5][j] = w[4][j];
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- // V[i][0] = Ww[i][0]/4.0f;
- V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
-
- // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][1] = vmulq_n_f32(
- vaddq_f32(
- vaddq_f32(
- vaddq_f32(Ww[i][1], Ww[i][0]),
- vaddq_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- -1.0f/6.0f
- );
-
- // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
- V[i][2] = vmulq_n_f32(
- vsubq_f32(
- vaddq_f32(
- vsubq_f32(Ww[i][1], Ww[i][0]),
- vsubq_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- 1.0f/6.0f
- );
-
- // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][3] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
- vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = vmulq_n_f32(
- vmlaq_n_f32(
- vaddq_f32(
- vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
- vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][5] = Ww[i][4];
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- // Ww[0][j] = w[0][j]/4.0f;
- Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
-
- // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[1][j] = vmul_n_f32(
- vadd_f32(
- vadd_f32(
- vadd_f32(w[1][j], w[0][j]),
- vadd_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- -1.0f/6.0f
- );
-
- // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
- Ww[2][j] = vmul_n_f32(
- vsub_f32(
- vadd_f32(
- vsub_f32(w[1][j], w[0][j]),
- vsub_f32(w[3][j], w[2][j])
- ),
- w[4][j]
- ),
- 1.0f/6.0f
- );
-
- // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[3][j] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
- vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
- vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
- ),
- w[4][j], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // Ww[5][j] = w[4][j];
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- // V[i][0] = Ww[i][0]/4.0f;
- V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
-
- // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][1] = vmul_n_f32(
- vadd_f32(
- vadd_f32(
- vadd_f32(Ww[i][1], Ww[i][0]),
- vadd_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- -1.0f/6.0f
- );
-
- // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
- V[i][2] = vmul_n_f32(
- vsub_f32(
- vadd_f32(
- vsub_f32(Ww[i][1], Ww[i][0]),
- vsub_f32(Ww[i][3], Ww[i][2])
- ),
- Ww[i][4]
- ),
- 1.0f/6.0f
- );
-
- // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][3] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
- vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = vmul_n_f32(
- vmla_n_f32(
- vadd_f32(
- vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
- vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
- ),
- Ww[i][4], 2.0f
- ),
- 1.0f/3.0f
- );
-
- // V[i][5] = Ww[i][4];
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[5][5], Ww[6][5], V[6][6];
-
- // Read weights
- for (int i = 0; i < 5; i++)
- {
- for (int j = 0; j < 5; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 5; j++)
- {
- Ww[0][j] = w[0][j]/4.0f;
- Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
- Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
- Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
- Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
- Ww[5][j] = w[4][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = Ww[i][0]/4.0f;
- V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
- V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
- V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
- V[i][5] = Ww[i][4];
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- return 0; // TODO
- }
-
- template class WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>;
-} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
deleted file mode 100644
index 2f14e20142..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[kernel_cols];
- for (int j = 0; j < kernel_cols; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[kernel_cols], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < kernel_cols; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36;
- V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
- V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
- V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
- V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
- V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
- V[7] = (w[4]*1) / 1;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template <>
- template <>
- void WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Redirect to the 1xN implementation
- WinogradGEMM<1, 4, 1, 5>::template WeightsTransform<float>::execute(
- n_output_channels, n_input_channels, input, output, matrix_stride,
- matrix_row_stride
- );
- }
-
- template <>
- template <>
- int WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template struct WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>;
- template struct WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp
deleted file mode 100644
index a56a475fc9..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
- /* Float implementation for kernel transform F(4x4, 3x3) */
- template <>
- template <>
- void WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const auto weight_row_stride = 3 * weight_col_stride;
- const float *inptrs[3][3];
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
- }
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
-#ifdef __aarch64__
- for (; channels_remaining >= 4; channels_remaining -= 4)
- {
- // Matrices used and computed in this kernel
- float32x4_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1q_f32(inptrs[i][j]);
- inptrs[i][j] += 4;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmulq_n_f32(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1q_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 4;
- }
-#endif // __aarch64__
-#ifdef __arm_any__
- for (; channels_remaining >= 2; channels_remaining -= 2)
- {
- // Matrices used and computed in this kernel
- float32x2_t w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = vld1_f32(inptrs[i][j]);
- inptrs[i][j] += 2;
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- // Ww[0][j] = 6*w[0][j];
- Ww[0][j] = vmul_n_f32(w[0][j], 6.0);
-
- // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
- // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
- // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
- // Ww[5][j] = 24*w[2][j];
- Ww[5][j] = vmul_n_f32(w[2][j], 24.0f);
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- const float recip576 = 1.0f / 576.0f;
-
- // V[i][0] = 6*Ww[i][0];
- V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576);
-
- // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
- V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
- // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
- V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
- // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
- V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
- V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
- // V[i][5] = 24*Ww[i][2];
- V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576);
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- vst1_f32(outptr + m*matrix_stride, V[i][j]);
- }
- }
- outptr += 2;
- }
-#endif // __arm_any__
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3][3], Ww[6][3], V[6][6];
-
- // Read weights
- for (int i = 0; i < 3; i++)
- {
- for (int j = 0; j < 3; j++)
- {
- w[i][j] = *(inptrs[i][j]++);
- }
- }
-
- // Compute the matrix W w
- for (int j = 0; j < 3; j++)
- {
- Ww[0][j] = 6*w[0][j];
- Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
- Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
- Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
- Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
- Ww[5][j] = 24*w[2][j];
- }
-
- // Compute V = W w WT
- for (int i = 0; i < 6; i++)
- {
- V[i][0] = ( 6*Ww[i][0]) / 576.0;
- V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
- V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
- V[i][5] = (24*Ww[i][2]) / 576.0;
- }
-
- // Store the transformed weights
- for (int i = 0, m = 0; i < 6; i++)
- {
- for (int j = 0; j < 6; j++, m++)
- {
- *(outptr + m*matrix_stride) = V[i][j];
- }
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- const int channel_prod = shape.n_input_channels * shape.n_output_channels;
- return 9 * 16 * channel_prod;
- }
-
- template struct WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
deleted file mode 100644
index c560aa8c8f..0000000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-
-namespace winograd
-{
- template <>
- template <>
- void WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Get pointers to each cell of the weight tensor
- const auto weight_col_stride = n_input_channels * n_output_channels;
- const float *inptrs[3];
- for (int j = 0; j < 3; j++)
- {
- inptrs[j] = input + j*weight_col_stride;
- }
-
- // For each input channel
- for (int ic = 0; ic < n_input_channels; ic++)
- {
- float *outptr = output + ic * matrix_row_stride;
-
- // For each output channel
- int channels_remaining = n_output_channels;
- for (; channels_remaining; channels_remaining--)
- {
- // Matrices used and computed in this kernel
- float w[3], V[inner_tile_cols];
-
- // Read weights
- for (int j = 0; j < 3; j++)
- {
- w[j] = *(inptrs[j]++);
- }
-
- // Compute V = w WT
- V[0] = (w[0]*-1) / 36.0f;
- V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
- V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
- V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
- V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
- V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
- V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
- V[7] = (w[2]*1) / 1;
-
- // Store the transformed weights
- for (int j = 0; j < inner_tile_cols; j++)
- {
- *(outptr + j*matrix_stride) = V[j];
- }
- outptr++;
- }
- }
- }
-
- template <>
- template <>
- int WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template <>
- template <>
- void WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::execute(
- const int n_output_channels,
- const int n_input_channels,
- const float* const input, // NOTE: Data in HWIO order
- float* const output,
- const int matrix_stride,
- const int matrix_row_stride
- )
- {
- // Redirect to the 1xN implementation
- WinogradGEMM<1, 6, 1, 3>::template WeightsTransform<float>::execute(
- n_output_channels, n_input_channels, input, output, matrix_stride,
- matrix_row_stride
- );
- }
-
- template <>
- template <>
- int WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
- {
- (void) shape;
- return 0; // TODO
- }
-
- template struct WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>;
- template struct WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/winograd.cpp
index a7de2fd3e5..226f303c7d 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,14 +22,13 @@
* SOFTWARE.
*/
#include <cstring>
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
+#include "winograd.hpp"
using namespace winograd;
/** Get the output shape of a convolution. */
-template <int kr, int kc, int itr, int itc>
-template <typename TOut, typename TIn>
-Tensor4DShape WinogradGEMM<kr, kc, itr, itc>::Convolution<TOut, TIn>::get_output_shape(
+template <int kr, int kc, int itr, int itc, WinogradRoots R>
+template <typename TOut, typename TIn, typename TInGEMM, typename TOutGEMM>
+Tensor4DShape WinogradGEMM<kr, kc, itr, itc, R>::Convolution<TOut, TIn, TInGEMM, TOutGEMM>::get_output_shape(
const KernelShape &kernel_shape,
const Tensor4DShape &in_shape,
const PaddingType padding
@@ -47,9 +46,9 @@ Tensor4DShape WinogradGEMM<kr, kc, itr, itc>::Convolution<TOut, TIn>::get_output
/* Get the memory required to transform the kernel.
*/
template <int kernel_rows, int kernel_cols,
- int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_transform_working_size(const KernelShape &shape)
+ int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_transform_working_size(const KernelShape &shape)
{
if (shape.ordering == HWIO)
{
@@ -68,17 +67,17 @@ size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols
/** Get the memory required to store the kernel transformed into the
* Winograd domain.
*/
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_storage_size(const KernelShape &shape)
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_storage_size(const KernelShape &shape)
{
return N_GEMMS * get_kernel_matrix_size(shape);
}
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_input_storage_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_input_storage_size(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding
@@ -88,9 +87,9 @@ size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols
}
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_output_storage_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_output_storage_size(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding
@@ -102,9 +101,9 @@ size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols
/** Get the memory required to apply a Winograd operator to some input.
*/
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_working_space_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_working_space_size(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding_type
@@ -139,20 +138,20 @@ size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols
/* Get the memory required by a single "input" matrix.
*/
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_input_matrix_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_input_matrix_size(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding_type
)
{
- return get_input_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TIn);
+ return get_input_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TGIn);
}
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_input_matrix_stride(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_input_matrix_stride(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding_type
@@ -171,21 +170,21 @@ int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::
/* Get the memory required by a single "output" matrix.
*/
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_output_matrix_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_output_matrix_size(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding_type
)
{
- return get_output_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TOut);
+ return get_output_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TGOut);
}
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_output_matrix_stride(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_output_matrix_stride(
const KernelShape &kernel_shape,
const Tensor4DShape &input_shape,
const PaddingType padding_type
@@ -204,16 +203,16 @@ int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::
/* Get the memory required by a single "kernel" matrix.
*/
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_matrix_size(const KernelShape &shape)
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_matrix_size(const KernelShape &shape)
{
- return sizeof(TIn) * get_kernel_matrix_stride(shape);
+ return sizeof(TGIn) * get_kernel_matrix_stride(shape);
}
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_matrix_stride(const KernelShape &shape)
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_matrix_stride(const KernelShape &shape)
{
const int K = shape.n_input_channels;
const int N = roundup(shape.n_output_channels, N_BLOCK);
@@ -222,19 +221,16 @@ int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::
// Instantiate required implementations
-template class WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>;
-template class WinogradGEMM<4, 4, 3, 3>::Convolution<float, float>;
+template class WinogradGEMM<2, 2, 3, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<4, 4, 3, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>;
-template class WinogradGEMM<1, 6, 1, 3>::Convolution<float, float>;
-template class WinogradGEMM<6, 1, 3, 1>::Convolution<float, float>;
-
-template class WinogradGEMM<2, 2, 5, 5>::Convolution<float, float>;
-
-template class WinogradGEMM<1, 4, 1, 5>::Convolution<float, float>;
-template class WinogradGEMM<4, 1, 5, 1>::Convolution<float, float>;
-
-template class WinogradGEMM<1, 2, 1, 7>::Convolution<float, float>;
-template class WinogradGEMM<2, 1, 7, 1>::Convolution<float, float>;
+template class WinogradGEMM<1, 6, 1, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<6, 1, 3, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<2, 2, 5, 5, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<1, 4, 1, 5, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<4, 1, 5, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<1, 2, 1, 7, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<2, 1, 7, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>;
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp
new file mode 100644
index 0000000000..fcbd21fcd0
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "winograd.hpp"
+#include "padding.hpp"
+
+#define MEMBERFN(RTYPE) template <\
+ int InnerTileRows, int InnerTileCols,\
+ typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE InputTransform<InnerTileRows, InnerTileCols, TIn, TOut, Roots>
+
+
+#define Nx1MEMBERFN(RTYPE) template <\
+ int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE InputTransform<InnerTileRows, 1, TIn, TOut, Roots>
+
+namespace winograd
+{
+
+MEMBERFN()::InputTransform(
+ const int kernel_rows,
+ const int kernel_cols,
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ const int padding_top,
+ const int padding_left,
+ const int padding_bottom,
+ const int padding_right
+) : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
+ _inptr(nullptr), _outptr(nullptr),
+ _overlap_rows(kernel_rows - 1), _overlap_cols(kernel_cols - 1),
+ _padding_top(padding_top), _padding_left(padding_left), _padding_bottom(padding_bottom), _padding_right(padding_right),
+ _tiles_M(iceildiv(padding_top + n_rows + padding_bottom - kernel_rows + 1, InnerTileRows - kernel_rows + 1)),
+ _tiles_N(iceildiv(padding_left + n_cols + padding_right - kernel_cols + 1, InnerTileCols - kernel_cols + 1)),
+ _matrix_stride(0), _matrix_row_stride(0), _matrix_batch_stride(0),
+ _in_col_stride(0), _in_row_stride(0), _in_batch_stride(0),
+ _working_space_col_stride(n_channels),
+ _working_space_row_stride(InnerTileCols * _working_space_col_stride),
+ _working_space(nullptr)
+{
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr)
+{
+ set_input_tensor(inptr, _n_channels);
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol)
+{
+ set_input_tensor(inptr, _n_cols * ldcol, ldcol);
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol)
+{
+ set_input_tensor(inptr, _n_rows * ldrow, ldrow, ldcol);
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+ _inptr = static_cast<const TIn *>(inptr);
+ _in_batch_stride = ldbatch;
+ _in_row_stride = ldrow;
+ _in_col_stride = ldcol;
+}
+
+MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow)
+{
+ _outptr = static_cast<TOut *>(mptr);
+ _matrix_stride = ldmatrix;
+ _matrix_row_stride = ldrow;
+ _matrix_batch_stride = _tiles_M * _tiles_N * ldrow;
+}
+
+Nx1MEMBERFN()::InputTransform(
+ const int kernel_rows,
+ const int kernel_cols,
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ const int padding_top,
+ const int padding_left,
+ const int padding_bottom,
+ const int padding_right
+) : InputTransform<1, InnerTileRows, TIn, TOut, Roots>::InputTransform(
+ /* Transpose rows and columns */
+ kernel_cols, kernel_rows, n_batches, n_cols, n_rows, n_channels,
+ padding_left, padding_top, padding_right, padding_bottom
+ )
+{
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr)
+{
+ set_input_tensor(inptr, this->_n_channels);
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol)
+{
+ set_input_tensor(inptr, this->_n_cols * ldcol, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol)
+{
+ set_input_tensor(inptr, this->_n_rows * ldrow, ldrow, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+ // Transpose row and column strides
+ Base::set_input_tensor(inptr, ldbatch, ldcol, ldrow);
+}
+
+MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const
+{
+ return sizeof(TIn) * InnerTileRows * _working_space_row_stride * nthreads;
+}
+
+MEMBERFN(void)::set_working_space(void * const buffer)
+{
+ _working_space = static_cast<TIn *>(buffer);
+}
+
+MEMBERFN(unsigned int)::get_window(void) const
+{
+ return iceildiv(_n_channels, WINDOW_BLOCK);
+}
+
+MEMBERFN(void)::run(
+ const unsigned int start,
+ const unsigned int stop,
+ const unsigned int threadid
+)
+{
+ // Determine the channels on which to work
+ if (start >= get_window())
+ {
+ return; // No work to do beyond the end of the window
+ }
+ const unsigned int start_channel = start * WINDOW_BLOCK;
+ const unsigned int stop_channel = std::min<unsigned int>(_n_channels , stop * WINDOW_BLOCK);
+ const unsigned int n_channels = stop_channel - start_channel;
+
+ // Loop over batches
+ for (int batch = 0; batch < _n_batches; batch++)
+ {
+ const TIn* const inptr_batch = _inptr + start_channel + batch*_in_batch_stride;
+ TOut* const outptr_batch = _outptr + start_channel + batch*_matrix_batch_stride;
+
+ // Loop over rows of tiles
+ for (int tile_i = 0; tile_i < _tiles_M; tile_i++)
+ {
+ // Compute the starting and ending row of pixels within the row of tiles,
+ // hence compute the padding to apply to the top and bottom of each tile.
+ const int row_top = tile_i * (InnerTileRows - _overlap_rows) - _padding_top;
+ const int row_bottom = row_top + InnerTileRows;
+ const int row_pad_top = std::max(0, _padding_top - tile_i * (InnerTileRows - _overlap_rows));
+ const int row_pad_bottom = std::max(0, row_bottom - _n_rows);
+
+ // Get a pointer to the start of the row.
+ const int row_offset = std::min(0, row_pad_top - _padding_top);
+ const TIn* const inptr_row = inptr_batch + _in_row_stride*(row_offset + tile_i*(InnerTileRows - _overlap_rows));
+ TOut* const outptr_row = outptr_batch + tile_i*_tiles_N*_matrix_row_stride;
+
+ // Loop over tiles within the row
+ for (int tile_j = 0; tile_j < _tiles_N; tile_j++)
+ {
+ // Compute the starting and ending column of pixels within the tile,
+ // hence compute the padding to apply to the left and right of the
+ // tile.
+ const int tile_left = tile_j * (InnerTileCols - _overlap_cols) - _padding_left;
+ const int tile_right = tile_left + InnerTileCols;
+ const int tile_pad_left = std::max(0, _padding_left - tile_j * (InnerTileCols - _overlap_cols));
+ const int tile_pad_right = std::max(0, tile_right - _n_cols);
+
+ // Get a pointer to the start of the tile.
+ const int col_offset = std::min(0, tile_pad_left - _padding_left);
+ const TIn* const inptr_tile = inptr_row + _in_col_stride*(col_offset + tile_j*(InnerTileCols - _overlap_cols));
+ TOut* const outptr_tile = outptr_row + tile_j * _matrix_row_stride;
+
+ // Transform the tile, applying padding if necessary.
+ if (row_pad_top || tile_pad_left || row_pad_bottom || tile_pad_right)
+ {
+ transform_padded_tile(
+ threadid, n_channels, outptr_tile, inptr_tile,
+ row_pad_top, tile_pad_left, row_pad_bottom, tile_pad_right
+ );
+ }
+ else
+ {
+ transform_unpadded_tile(threadid, n_channels, outptr_tile, inptr_tile);
+ }
+ }
+ }
+ }
+}
+
+MEMBERFN(void)::transform_unpadded_tile(
+ const unsigned int /* threadid unused */,
+ const int n_channels,
+ TOut * const outptr,
+ const TIn * const inptr
+)
+{
+ transform_tile(
+ n_channels, inptr, _in_row_stride, _in_col_stride, outptr, _matrix_stride
+ );
+}
+
+MEMBERFN(void)::transform_padded_tile(
+ const unsigned int threadid,
+ const int n_channels,
+ TOut * const outptr,
+ const TIn * const inptr,
+ const int padding_top,
+ const int padding_left,
+ const int padding_bottom,
+ const int padding_right
+)
+{
+ padding::copy_and_pad_tile(
+ InnerTileRows, InnerTileCols, n_channels,
+ inptr, _in_row_stride, _in_col_stride,
+ static_cast<TIn *>(get_working_space(threadid)), _working_space_row_stride, _working_space_col_stride,
+ padding_top, padding_left, padding_bottom, padding_right
+ );
+
+ transform_tile(
+ n_channels, static_cast<const TIn *>(get_working_space(threadid)),
+ _working_space_row_stride, _working_space_col_stride,
+ outptr, _matrix_stride
+ );
+}
+
+MEMBERFN(void *)::get_working_space(const unsigned int threadid) const
+{
+ return _working_space + InnerTileRows * _working_space_row_stride * threadid;
+}
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp
index e66300d39a..5040ec1bd4 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,43 +22,27 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm.hpp"
+#include "input.hpp"
-namespace
+namespace winograd
{
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_1x8_fp32_process_tile(
- int n_channels,
+template <>
+void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile(
+ const int n_channels,
const float* const input_base,
- const int input_row_stride,
+ const int, // We don't need to stride over rows
const int input_col_stride,
- float* const matrix_base,
- const int matrix_stride,
- const int _pad_top,
- const int _pad_left,
- const int _pad_bottom,
- const int _pad_right
+ float* outptr,
+ const int matrix_stride
)
{
- (void) input_row_stride; // No rows over which to stride
- (void) _pad_top; // Never any top padding
- (void) _pad_bottom; // Never any bottom padding
-
- // Extract padding arguments
- const int pad_left = Specialized ? PadLeft : _pad_left;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
constexpr int inner_tile_cols = 8;
- const int cells_j = inner_tile_cols - pad_right;
-
- float *outptr = matrix_base;
// Get pointers into the input tile
const float *x_ptrs[inner_tile_cols];
- for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
{
x_ptrs[j] = input_base + xj*input_col_stride;
}
@@ -75,7 +59,7 @@ void winograd_input_transform_1x8_fp32_process_tile(
// Perform the Winograd input transformation for each channel in the input
// tensor.
int channels_remaining = n_channels;
-#ifdef __arm_any__
+#ifdef _arm_any_
for (; channels_remaining >= 4; channels_remaining -= 4)
{
float32x4_t x[inner_tile_cols], U[inner_tile_cols];
@@ -85,7 +69,7 @@ void winograd_input_transform_1x8_fp32_process_tile(
}
// Load x
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[j] = vld1q_f32(x_ptrs[j]);
x_ptrs[j] += 4;
@@ -117,7 +101,7 @@ void winograd_input_transform_1x8_fp32_process_tile(
}
// Load x
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[j] = vld1_f32(x_ptrs[j]);
x_ptrs[j] += 2;
@@ -140,11 +124,11 @@ void winograd_input_transform_1x8_fp32_process_tile(
}
outptr += 2;
}
-#endif // __arm_any__
+#endif // _arm_any_
for (; channels_remaining; channels_remaining--)
{
// Load x
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[j] = *(x_ptrs[j]++);
}
@@ -168,94 +152,7 @@ void winograd_input_transform_1x8_fp32_process_tile(
}
}
-}
-
-namespace winograd
-{
-template <int x>
-using Tiles = InputTransformImplTiles<1, x, 1, 8, float>;
-
-/*****************************************************************************/
-// 1x3 specialisations
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-/*****************************************************************************/
-// 1x5 specialisations
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 2, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-/*****************************************************************************/
-// 1x7 specialisations
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 1, 0, 0>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 3, 0, 0>,
-};
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
- winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
+template class InputTransform<1, 8, float, float, WinogradRoots::Integers>;
+template class InputTransform<8, 1, float, float, WinogradRoots::Integers>;
-template class InputTransform<1, 3, 1, 8, float>;
-template class InputTransform<3, 1, 8, 1, float>;
-template class InputTransform<1, 5, 1, 8, float>;
-template class InputTransform<5, 1, 8, 1, float>;
-template class InputTransform<1, 7, 1, 8, float>;
-template class InputTransform<7, 1, 8, 1, float>;
} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp
index 4203945dd3..9393785dfc 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,67 +22,45 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "input.hpp"
+#include "arm.hpp"
namespace winograd
{
-using Tiles = InputTransformImplTiles<3, 3, 4, 4, float>;
-
-namespace
-{
-
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_4x4_fp32_process_tile(
- int n_channels,
+template <>
+void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile(
+ const int n_channels,
const float* const input_base,
const int input_row_stride,
const int input_col_stride,
- float* const matrix_base,
- const int matrix_stride,
- const int _pad_top,
- const int _pad_left,
- const int _pad_bottom,
- const int _pad_right
- )
+ float* outptr,
+ const int matrix_stride
+)
{
-const int pad_top = Specialized ? PadTop : _pad_top;
- const int pad_left = Specialized ? PadLeft : _pad_left;
- const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
- const int pad_right = Specialized ? PadRight : _pad_right;
-
- constexpr int inner_tile_i = 4, inner_tile_j = 4;
- const int cells_i = inner_tile_i - pad_bottom;
- const int cells_j = inner_tile_i - pad_right;
-
-
-
- float *outptr = matrix_base;
+ constexpr int inner_tile_rows = 4, inner_tile_cols = 4;
// Get pointers into the input tile
- const float *x_ptrs[inner_tile_i][inner_tile_j];
- for (int i = pad_top, xi = 0; i < cells_i; i++, xi++)
+ const float *x_ptrs[inner_tile_rows][inner_tile_cols];
+ for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
{
// Get a pointer into the row
const float* const row_ptr = input_base + xi*input_row_stride;
- for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
{
x_ptrs[i][j] = row_ptr + xj*input_col_stride;
}
}
// Matrices used/computed in this kernel.
- float x[inner_tile_i][inner_tile_j];
- float XTx[inner_tile_i][inner_tile_j];
- float U[inner_tile_i][inner_tile_j];
+ float x[inner_tile_rows][inner_tile_cols];
+ float XTx[inner_tile_rows][inner_tile_cols];
+ float U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
- for (int j = 0; j < inner_tile_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[i][j] = XTx[i][j] = 0.0f;
}
@@ -95,13 +73,13 @@ const int pad_top = Specialized ? PadTop : _pad_top;
for (; channels_remaining >= 4; channels_remaining -= 4)
{
// Matrices used/computed in this kernel.
- float32x4_t x[inner_tile_i][inner_tile_j];
- float32x4_t XTx[inner_tile_i][inner_tile_j];
- float32x4_t U[inner_tile_i][inner_tile_j];
+ float32x4_t x[inner_tile_rows][inner_tile_cols];
+ float32x4_t XTx[inner_tile_rows][inner_tile_cols];
+ float32x4_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
- for (int j = 0; j < inner_tile_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[i][j] = vdupq_n_f32(0.0f);
XTx[i][j] = vdupq_n_f32(0.0f);
@@ -109,9 +87,9 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Load x
- for (int i = pad_top; i < cells_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[i][j] = vld1q_f32(x_ptrs[i][j]);
x_ptrs[i][j] += 4;
@@ -119,7 +97,7 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
// XTx[0][j] = x[0][j] - x[2][j];
XTx[0][j] = vsubq_f32(x[0][j], x[2][j]);
@@ -135,7 +113,7 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Compute U = XT . x . X
- for (int i = 0; i < inner_tile_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
// U[i][0] = XTx[i][0] - XTx[i][2];
U[i][0] = vsubq_f32(XTx[i][0], XTx[i][2]);
@@ -151,9 +129,9 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_i; i++)
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
{
- for (int j = 0; j < inner_tile_j; j++, m++)
+ for (int j = 0; j < inner_tile_cols; j++, m++)
{
vst1q_f32(outptr + m*matrix_stride, U[i][j]);
}
@@ -165,13 +143,13 @@ const int pad_top = Specialized ? PadTop : _pad_top;
for (; channels_remaining >= 2; channels_remaining -= 2)
{
// Matrices used/computed in this kernel.
- float32x2_t x[inner_tile_i][inner_tile_j];
- float32x2_t XTx[inner_tile_i][inner_tile_j];
- float32x2_t U[inner_tile_i][inner_tile_j];
+ float32x2_t x[inner_tile_rows][inner_tile_cols];
+ float32x2_t XTx[inner_tile_rows][inner_tile_cols];
+ float32x2_t U[inner_tile_rows][inner_tile_cols];
- for (int i = 0; i < inner_tile_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
- for (int j = 0; j < inner_tile_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[i][j] = vdup_n_f32(0.0f);
XTx[i][j] = vdup_n_f32(0.0f);
@@ -179,9 +157,9 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Load x
- for (int i = pad_top; i < cells_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[i][j] = vld1_f32(x_ptrs[i][j]);
x_ptrs[i][j] += 2;
@@ -189,7 +167,7 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
// XTx[0][j] = x[0][j] - x[2][j];
XTx[0][j] = vsub_f32(x[0][j], x[2][j]);
@@ -205,7 +183,7 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Compute U = XT . x . X
- for (int i = 0; i < inner_tile_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
// U[i][0] = XTx[i][0] - XTx[i][2];
U[i][0] = vsub_f32(XTx[i][0], XTx[i][2]);
@@ -221,9 +199,9 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_i; i++)
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
{
- for (int j = 0; j < inner_tile_j; j++, m++)
+ for (int j = 0; j < inner_tile_cols; j++, m++)
{
vst1_f32(outptr + m*matrix_stride, U[i][j]);
}
@@ -234,16 +212,16 @@ const int pad_top = Specialized ? PadTop : _pad_top;
for (; channels_remaining; channels_remaining--)
{
// Load x
- for (int i = pad_top; i < cells_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
x[i][j] = *(x_ptrs[i][j]++);
}
}
// Compute XT . x
- for (int j = pad_left; j < cells_j; j++)
+ for (int j = 0; j < inner_tile_cols; j++)
{
XTx[0][j] = x[0][j] - x[2][j];
XTx[1][j] = x[1][j] + x[2][j];
@@ -252,7 +230,7 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Compute U = XT . x . X
- for (int i = 0; i < inner_tile_i; i++)
+ for (int i = 0; i < inner_tile_rows; i++)
{
U[i][0] = XTx[i][0] - XTx[i][2];
U[i][1] = XTx[i][1] + XTx[i][2];
@@ -261,9 +239,9 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
// Store the transformed matrix
- for (int i = 0, m = 0; i < inner_tile_i; i++)
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
{
- for (int j = 0; j < inner_tile_j; j++, m++)
+ for (int j = 0; j < inner_tile_cols; j++, m++)
{
*(outptr + m*matrix_stride) = U[i][j];
}
@@ -272,40 +250,6 @@ const int pad_top = Specialized ? PadTop : _pad_top;
}
}
-} // namespace (anonymous)
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_input_transform_4x4_fp32_process_tile<false>;
+template class InputTransform<4, 4, float, float, WinogradRoots::Integers>;
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_input_transform_4x4_fp32_process_tile<true>;
-
-
-template <>
-const Tiles::TileFn Tiles::tilefn_top_padded[n_pad_top] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 1, 0, 0, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_left_padded[n_pad_left] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 1, 0>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 2, 0>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 3, 0>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 4, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 1>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 2>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 3>,
- winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 4>,
-};
-
-template class InputTransform<3, 3, 4, 4, float>;
-} // namespace winograd
+} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..908fc8292a
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp
@@ -0,0 +1,1308 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "input.hpp"
+
+namespace winograd
+{
+
+#ifdef __aarch64__
+
+template <>
+void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile(
+ int n_channels,
+ const float* input_base,
+ const int input_row_stride,
+ const int input_col_stride,
+ float* matrix_base,
+ const int matrix_stride
+)
+{
+ const float pcoeffs[4] = {1.0f, 2.0f, 4.0f, 5.0f};
+ __asm__ __volatile__(
+ "ldr q0, [%[pcoeffs]]\n"
+ "add x25, %[inptr0], %[input_row_stride]\n"
+ "add x18, %[input_col_stride1], %[input_col_stride1]\n"
+ "add x16, x25, %[input_row_stride]\n"
+ "add x19, x18, %[input_col_stride1]\n"
+ "add x26, x16, %[input_row_stride]\n"
+ "add x20, x19, %[input_col_stride1]\n"
+ "add x17, x26, %[input_row_stride]\n"
+ "add x21, x20, %[input_col_stride1]\n"
+ "add x27, x17, %[input_row_stride]\n"
+ "add x28, %[outptr0], %[output_row_stride]\n"
+ "add x11, %[output_col_stride1], %[output_col_stride1]\n"
+ "add x22, x28, %[output_row_stride]\n"
+ "add x13, x11, %[output_col_stride1]\n"
+ "add x12, x22, %[output_row_stride]\n"
+ "add x23, x13, %[output_col_stride1]\n"
+ "add x14, x12, %[output_row_stride]\n"
+ "add x15, x23, %[output_col_stride1]\n"
+ "add x24, x14, %[output_row_stride]\n"
+ "cmp %w[n_channels], #4\n"
+ "blt 2f\n"
+ "1:\n"
+ "ldr q8, [%[inptr0], x20]\n"
+ "ldr q2, [%[inptr0], x18]\n"
+ "mov v14.16b, v8.16b\n"
+ "ldr q9, [%[inptr0]]\n"
+ "mov v10.16b, v8.16b\n"
+ "ldr q1, [%[inptr0], x21]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "ldr q4, [%[inptr0], x19]\n"
+ "mov v9.16b, v8.16b\n"
+ "ldr q12, [%[inptr0], %[input_col_stride1]]\n"
+ "fmls v10.4s, v12.4s, v0.s[2]\n"
+ "ldr q5, [x16, x20]\n"
+ "fmls v14.4s, v2.4s, v0.s[3]\n"
+ "ldr q20, [x16, x18]\n"
+ "fmla v9.4s, v12.4s, v0.s[2]\n"
+ "ldr q3, [x16]\n"
+ "fmls v10.4s, v2.4s, v0.s[2]\n"
+ "ldr q6, [x16, x21]\n"
+ "mov v7.16b, v8.16b\n"
+ "ldr q16, [x16, x19]\n"
+ "fmls v9.4s, v2.4s, v0.s[2]\n"
+ "ldr q22, [x16, %[input_col_stride1]]\n"
+ "fadd v10.4s, v10.4s, v4.4s\n"
+ "ldr q17, [x17, x20]\n"
+ "fmls v7.4s, v12.4s, v0.s[1]\n"
+ "ldr q15, [x17, x18]\n"
+ "fsub v9.4s, v9.4s, v4.4s\n"
+ "ldr q19, [x17]\n"
+ "mov v8.16b, v8.16b\n"
+ "ldr q18, [x17, x21]\n"
+ "fsub v7.4s, v7.4s, v2.4s\n"
+ "ldr q13, [x17, x19]\n"
+ "fmla v7.4s, v4.4s, v0.s[1]\n"
+ "ldr q21, [x17, %[input_col_stride1]]\n"
+ "fmla v8.4s, v12.4s, v0.s[1]\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "mov v11.16b, v1.16b\n"
+ "add x16, x16, #16\n"
+ "mov v1.16b, v5.16b\n"
+ "add x17, x17, #16\n"
+ "fsub v8.4s, v8.4s, v2.4s\n"
+ "fmla v11.4s, v12.4s, v0.s[2]\n"
+ "fmls v8.4s, v4.4s, v0.s[1]\n"
+ "fmla v1.4s, v3.4s, v0.s[2]\n"
+ "mov v2.16b, v5.16b\n"
+ "mov v3.16b, v5.16b\n"
+ "fmls v11.4s, v4.4s, v0.s[3]\n"
+ "mov v4.16b, v5.16b\n"
+ "fmls v1.4s, v20.4s, v0.s[3]\n"
+ "fmls v2.4s, v22.4s, v0.s[2]\n"
+ "fmla v3.4s, v22.4s, v0.s[2]\n"
+ "fmls v4.4s, v22.4s, v0.s[1]\n"
+ "mov v5.16b, v5.16b\n"
+ "mov v6.16b, v6.16b\n"
+ "fmls v2.4s, v20.4s, v0.s[2]\n"
+ "mov v12.16b, v17.16b\n"
+ "fmls v3.4s, v20.4s, v0.s[2]\n"
+ "fsub v4.4s, v4.4s, v20.4s\n"
+ "fmla v4.4s, v16.4s, v0.s[1]\n"
+ "fmla v5.4s, v22.4s, v0.s[1]\n"
+ "fadd v2.4s, v2.4s, v16.4s\n"
+ "fmla v6.4s, v22.4s, v0.s[2]\n"
+ "fsub v3.4s, v3.4s, v16.4s\n"
+ "fmla v12.4s, v19.4s, v0.s[2]\n"
+ "fsub v5.4s, v5.4s, v20.4s\n"
+ "mov v19.16b, v17.16b\n"
+ "fmls v5.4s, v16.4s, v0.s[1]\n"
+ "fmls v6.4s, v16.4s, v0.s[3]\n"
+ "fmls v12.4s, v15.4s, v0.s[3]\n"
+ "fmls v19.4s, v21.4s, v0.s[2]\n"
+ "mov v20.16b, v17.16b\n"
+ "mov v16.16b, v17.16b\n"
+ "mov v17.16b, v17.16b\n"
+ "mov v18.16b, v18.16b\n"
+ "fmls v19.4s, v15.4s, v0.s[2]\n"
+ "fmla v20.4s, v21.4s, v0.s[2]\n"
+ "fmls v16.4s, v21.4s, v0.s[1]\n"
+ "fmla v17.4s, v21.4s, v0.s[1]\n"
+ "fmla v18.4s, v21.4s, v0.s[2]\n"
+ "mov v23.16b, v12.16b\n"
+ "fadd v19.4s, v19.4s, v13.4s\n"
+ "fmls v20.4s, v15.4s, v0.s[2]\n"
+ "fsub v16.4s, v16.4s, v15.4s\n"
+ "fsub v17.4s, v17.4s, v15.4s\n"
+ "fmla v16.4s, v13.4s, v0.s[1]\n"
+ "fmls v17.4s, v13.4s, v0.s[1]\n"
+ "fsub v20.4s, v20.4s, v13.4s\n"
+ "fmls v18.4s, v13.4s, v0.s[3]\n"
+ "fmla v23.4s, v14.4s, v0.s[2]\n"
+ "mov v15.16b, v19.16b\n"
+ "mov v14.16b, v20.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "fmla v15.4s, v10.4s, v0.s[2]\n"
+ "mov v10.16b, v17.16b\n"
+ "fmls v23.4s, v1.4s, v0.s[3]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "fmla v24.4s, v7.4s, v0.s[2]\n"
+ "fmla v10.4s, v8.4s, v0.s[2]\n"
+ "fmls v15.4s, v2.4s, v0.s[3]\n"
+ "mov v7.16b, v18.16b\n"
+ "str q23, [%[outptr0]]\n"
+ "fmls v14.4s, v3.4s, v0.s[3]\n"
+ "fmls v24.4s, v4.4s, v0.s[3]\n"
+ "fmls v10.4s, v5.4s, v0.s[3]\n"
+ "str q15, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v7.4s, v11.4s, v0.s[2]\n"
+ "str q14, [%[outptr0], x11]\n"
+ "str q24, [%[outptr0], x13]\n"
+ "str q10, [%[outptr0], x23]\n"
+ "fmls v7.4s, v6.4s, v0.s[3]\n"
+ "str q7, [%[outptr0], x15]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v25.16b, v19.16b\n"
+ "ldr q11, [x25, x20]\n"
+ "mov v10.16b, v11.16b\n"
+ "ldr q23, [x25, x18]\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr q7, [x25]\n"
+ "fmla v10.4s, v7.4s, v0.s[2]\n"
+ "ldr q13, [x25, x21]\n"
+ "mov v7.16b, v11.16b\n"
+ "ldr q31, [x25, x19]\n"
+ "mov v8.16b, v11.16b\n"
+ "ldr q21, [x25, %[input_col_stride1]]\n"
+ "fmls v10.4s, v23.4s, v0.s[3]\n"
+ "ldr q30, [x26, x20]\n"
+ "fmls v9.4s, v21.4s, v0.s[2]\n"
+ "ldr q29, [x26, x18]\n"
+ "fmla v7.4s, v21.4s, v0.s[2]\n"
+ "ldr q22, [x26]\n"
+ "fmls v8.4s, v21.4s, v0.s[1]\n"
+ "ldr q24, [x26, x21]\n"
+ "fmls v9.4s, v23.4s, v0.s[2]\n"
+ "ldr q27, [x26, x19]\n"
+ "fmls v7.4s, v23.4s, v0.s[2]\n"
+ "ldr q28, [x26, %[input_col_stride1]]\n"
+ "fsub v8.4s, v8.4s, v23.4s\n"
+ "add x25, x25, #16\n"
+ "fadd v9.4s, v9.4s, v31.4s\n"
+ "add x26, x26, #16\n"
+ "fsub v7.4s, v7.4s, v31.4s\n"
+ "fmla v8.4s, v31.4s, v0.s[1]\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v15.16b, v13.16b\n"
+ "mov v14.16b, v30.16b\n"
+ "mov v13.16b, v30.16b\n"
+ "fmla v11.4s, v21.4s, v0.s[1]\n"
+ "fmla v15.4s, v21.4s, v0.s[2]\n"
+ "fmla v14.4s, v22.4s, v0.s[2]\n"
+ "fmls v13.4s, v28.4s, v0.s[2]\n"
+ "mov v21.16b, v30.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "fsub v11.4s, v11.4s, v23.4s\n"
+ "fmls v15.4s, v31.4s, v0.s[3]\n"
+ "fmls v11.4s, v31.4s, v0.s[1]\n"
+ "fmls v14.4s, v29.4s, v0.s[3]\n"
+ "fmls v13.4s, v29.4s, v0.s[2]\n"
+ "fmla v21.4s, v28.4s, v0.s[2]\n"
+ "fmls v22.4s, v28.4s, v0.s[1]\n"
+ "mov v23.16b, v30.16b\n"
+ "mov v24.16b, v24.16b\n"
+ "fmls v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v13.4s, v13.4s, v27.4s\n"
+ "fmls v21.4s, v29.4s, v0.s[2]\n"
+ "fsub v22.4s, v22.4s, v29.4s\n"
+ "fmla v23.4s, v28.4s, v0.s[1]\n"
+ "fmla v22.4s, v27.4s, v0.s[1]\n"
+ "fmla v24.4s, v28.4s, v0.s[2]\n"
+ "fsub v21.4s, v21.4s, v27.4s\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fsub v23.4s, v23.4s, v29.4s\n"
+ "fmls v25.4s, v9.4s, v0.s[2]\n"
+ "fmls v23.4s, v27.4s, v0.s[1]\n"
+ "fmls v24.4s, v27.4s, v0.s[3]\n"
+ "fadd v26.4s, v26.4s, v14.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str q26, [x28]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "fmls v27.4s, v7.4s, v0.s[2]\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v17.16b\n"
+ "mov v29.16b, v18.16b\n"
+ "fadd v25.4s, v25.4s, v13.4s\n"
+ "fmls v31.4s, v8.4s, v0.s[2]\n"
+ "str q25, [x28, %[output_col_stride1]]\n"
+ "fmls v27.4s, v3.4s, v0.s[2]\n"
+ "fmls v30.4s, v11.4s, v0.s[2]\n"
+ "fmls v29.4s, v15.4s, v0.s[2]\n"
+ "fmls v31.4s, v4.4s, v0.s[2]\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v27.4s, v27.4s, v21.4s\n"
+ "mov v25.16b, v19.16b\n"
+ "str q27, [x28, x11]\n"
+ "fmls v30.4s, v5.4s, v0.s[2]\n"
+ "fadd v31.4s, v31.4s, v22.4s\n"
+ "fmls v29.4s, v6.4s, v0.s[2]\n"
+ "str q31, [x28, x13]\n"
+ "fmla v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v30.4s, v30.4s, v23.4s\n"
+ "fmla v25.4s, v9.4s, v0.s[2]\n"
+ "str q30, [x28, x23]\n"
+ "fadd v29.4s, v29.4s, v24.4s\n"
+ "str q29, [x28, x15]\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "add x28, x28, #16\n"
+ "mov v30.16b, v20.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "fsub v26.4s, v26.4s, v14.4s\n"
+ "mov v28.16b, v17.16b\n"
+ "str q26, [x22]\n"
+ "fsub v25.4s, v25.4s, v13.4s\n"
+ "str q25, [x22, %[output_col_stride1]]\n"
+ "fmla v30.4s, v7.4s, v0.s[2]\n"
+ "fmla v29.4s, v8.4s, v0.s[2]\n"
+ "fmla v28.4s, v11.4s, v0.s[2]\n"
+ "mov v26.16b, v18.16b\n"
+ "mov v25.16b, v12.16b\n"
+ "fmls v30.4s, v3.4s, v0.s[2]\n"
+ "mov v31.16b, v19.16b\n"
+ "fmls v29.4s, v4.4s, v0.s[2]\n"
+ "fmls v28.4s, v5.4s, v0.s[2]\n"
+ "fmla v26.4s, v15.4s, v0.s[2]\n"
+ "fmls v25.4s, v10.4s, v0.s[1]\n"
+ "fsub v30.4s, v30.4s, v21.4s\n"
+ "fmls v31.4s, v9.4s, v0.s[1]\n"
+ "str q30, [x22, x11]\n"
+ "fsub v29.4s, v29.4s, v22.4s\n"
+ "str q29, [x22, x13]\n"
+ "fsub v28.4s, v28.4s, v23.4s\n"
+ "str q28, [x22, x23]\n"
+ "fmls v26.4s, v6.4s, v0.s[2]\n"
+ "fsub v25.4s, v25.4s, v1.4s\n"
+ "fsub v31.4s, v31.4s, v2.4s\n"
+ "fmla v25.4s, v14.4s, v0.s[1]\n"
+ "fmla v31.4s, v13.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v24.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str q26, [x22, x15]\n"
+ "mov v26.16b, v16.16b\n"
+ "str q25, [x12]\n"
+ "fmls v27.4s, v7.4s, v0.s[1]\n"
+ "str q31, [x12, %[output_col_stride1]]\n"
+ "fmls v26.4s, v8.4s, v0.s[1]\n"
+ "mov v25.16b, v17.16b\n"
+ "add x22, x22, #16\n"
+ "fsub v27.4s, v27.4s, v3.4s\n"
+ "mov v28.16b, v18.16b\n"
+ "fmla v27.4s, v21.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v4.4s\n"
+ "fmla v26.4s, v22.4s, v0.s[1]\n"
+ "fmls v25.4s, v11.4s, v0.s[1]\n"
+ "fmls v28.4s, v15.4s, v0.s[1]\n"
+ "mov v12.16b, v12.16b\n"
+ "str q27, [x12, x11]\n"
+ "mov v19.16b, v19.16b\n"
+ "str q26, [x12, x13]\n"
+ "fsub v25.4s, v25.4s, v5.4s\n"
+ "fmla v25.4s, v23.4s, v0.s[1]\n"
+ "fsub v28.4s, v28.4s, v6.4s\n"
+ "fmla v28.4s, v24.4s, v0.s[1]\n"
+ "fmla v12.4s, v10.4s, v0.s[1]\n"
+ "fmla v19.4s, v9.4s, v0.s[1]\n"
+ "mov v20.16b, v20.16b\n"
+ "str q25, [x12, x23]\n"
+ "mov v16.16b, v16.16b\n"
+ "str q28, [x12, x15]\n"
+ "fsub v12.4s, v12.4s, v1.4s\n"
+ "fmls v12.4s, v14.4s, v0.s[1]\n"
+ "add x12, x12, #16\n"
+ "fsub v19.4s, v19.4s, v2.4s\n"
+ "fmla v20.4s, v7.4s, v0.s[1]\n"
+ "fmls v19.4s, v13.4s, v0.s[1]\n"
+ "fmla v16.4s, v8.4s, v0.s[1]\n"
+ "str q12, [x14]\n"
+ "mov v1.16b, v17.16b\n"
+ "fsub v20.4s, v20.4s, v3.4s\n"
+ "mov v17.16b, v18.16b\n"
+ "str q19, [x14, %[output_col_stride1]]\n"
+ "fmls v20.4s, v21.4s, v0.s[1]\n"
+ "fsub v16.4s, v16.4s, v4.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fmls v16.4s, v22.4s, v0.s[1]\n"
+ "fmla v17.4s, v15.4s, v0.s[1]\n"
+ "str q20, [x14, x11]\n"
+ "fsub v1.4s, v1.4s, v5.4s\n"
+ "str q16, [x14, x13]\n"
+ "fmls v1.4s, v23.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v6.4s\n"
+ "fmls v17.4s, v24.4s, v0.s[1]\n"
+ "str q1, [x14, x23]\n"
+ "str q17, [x14, x15]\n"
+ "add x14, x14, #16\n"
+ "ldr q2, [x27, x20]\n"
+ "mov v4.16b, v2.16b\n"
+ "ldr q17, [x27, x18]\n"
+ "mov v12.16b, v2.16b\n"
+ "ldr q18, [x27]\n"
+ "fmla v4.4s, v18.4s, v0.s[2]\n"
+ "ldr q3, [x27, x21]\n"
+ "mov v6.16b, v2.16b\n"
+ "ldr q5, [x27, x19]\n"
+ "mov v1.16b, v2.16b\n"
+ "ldr q18, [x27, %[input_col_stride1]]\n"
+ "fmls v4.4s, v17.4s, v0.s[3]\n"
+ "add x27, x27, #16\n"
+ "fmls v12.4s, v18.4s, v0.s[2]\n"
+ "sub %w[n_channels], %w[n_channels], #4\n"
+ "fmla v6.4s, v18.4s, v0.s[2]\n"
+ "cmp %w[n_channels], #4\n"
+ "fmls v1.4s, v18.4s, v0.s[1]\n"
+ "mov v2.16b, v2.16b\n"
+ "fmls v12.4s, v17.4s, v0.s[2]\n"
+ "mov v3.16b, v3.16b\n"
+ "fmls v6.4s, v17.4s, v0.s[2]\n"
+ "fmla v2.4s, v18.4s, v0.s[1]\n"
+ "fsub v1.4s, v1.4s, v17.4s\n"
+ "fmla v3.4s, v18.4s, v0.s[2]\n"
+ "fadd v12.4s, v12.4s, v5.4s\n"
+ "fmla v1.4s, v5.4s, v0.s[1]\n"
+ "fsub v6.4s, v6.4s, v5.4s\n"
+ "fsub v2.4s, v2.4s, v17.4s\n"
+ "fmls v2.4s, v5.4s, v0.s[1]\n"
+ "fmls v3.4s, v5.4s, v0.s[3]\n"
+ "mov v4.16b, v4.16b\n"
+ "mov v16.16b, v12.16b\n"
+ "mov v5.16b, v6.16b\n"
+ "mov v6.16b, v1.16b\n"
+ "fmla v4.4s, v10.4s, v0.s[2]\n"
+ "fmla v16.4s, v9.4s, v0.s[2]\n"
+ "fmla v5.4s, v7.4s, v0.s[2]\n"
+ "fmla v6.4s, v8.4s, v0.s[2]\n"
+ "mov v9.16b, v2.16b\n"
+ "mov v10.16b, v3.16b\n"
+ "fmls v4.4s, v14.4s, v0.s[3]\n"
+ "fmls v16.4s, v13.4s, v0.s[3]\n"
+ "fmls v5.4s, v21.4s, v0.s[3]\n"
+ "fmls v6.4s, v22.4s, v0.s[3]\n"
+ "fmla v9.4s, v11.4s, v0.s[2]\n"
+ "fmla v10.4s, v15.4s, v0.s[2]\n"
+ "str q4, [x24]\n"
+ "str q16, [x24, %[output_col_stride1]]\n"
+ "str q5, [x24, x11]\n"
+ "str q6, [x24, x13]\n"
+ "fmls v9.4s, v23.4s, v0.s[3]\n"
+ "fmls v10.4s, v24.4s, v0.s[3]\n"
+ "str q9, [x24, x23]\n"
+ "str q10, [x24, x15]\n"
+ "add x24, x24, #16\n"
+ "bge 1b\n"
+ "2:\n"
+ "cmp %w[n_channels], #2\n"
+ "blt 3f\n"
+ "ldr d8, [%[inptr0], x20]\n"
+ "mov v14.16b, v8.16b\n"
+ "ldr d2, [%[inptr0], x18]\n"
+ "mov v10.16b, v8.16b\n"
+ "ldr d9, [%[inptr0]]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "ldr d1, [%[inptr0], x21]\n"
+ "mov v9.16b, v8.16b\n"
+ "ldr d4, [%[inptr0], x19]\n"
+ "mov v7.16b, v8.16b\n"
+ "ldr d12, [%[inptr0], %[input_col_stride1]]\n"
+ "fmls v14.4s, v2.4s, v0.s[3]\n"
+ "ldr d5, [x16, x20]\n"
+ "fmls v10.4s, v12.4s, v0.s[2]\n"
+ "ldr d20, [x16, x18]\n"
+ "fmla v9.4s, v12.4s, v0.s[2]\n"
+ "ldr d3, [x16]\n"
+ "fmls v7.4s, v12.4s, v0.s[1]\n"
+ "ldr d6, [x16, x21]\n"
+ "fmls v10.4s, v2.4s, v0.s[2]\n"
+ "ldr d16, [x16, x19]\n"
+ "fmls v9.4s, v2.4s, v0.s[2]\n"
+ "ldr d22, [x16, %[input_col_stride1]]\n"
+ "fsub v7.4s, v7.4s, v2.4s\n"
+ "ldr d17, [x17, x20]\n"
+ "fadd v10.4s, v10.4s, v4.4s\n"
+ "ldr d15, [x17, x18]\n"
+ "fsub v9.4s, v9.4s, v4.4s\n"
+ "ldr d19, [x17]\n"
+ "fmla v7.4s, v4.4s, v0.s[1]\n"
+ "ldr d18, [x17, x21]\n"
+ "mov v8.16b, v8.16b\n"
+ "ldr d13, [x17, x19]\n"
+ "mov v11.16b, v1.16b\n"
+ "ldr d21, [x17, %[input_col_stride1]]\n"
+ "fmla v8.4s, v12.4s, v0.s[1]\n"
+ "add %[inptr0], %[inptr0], #8\n"
+ "fmla v11.4s, v12.4s, v0.s[2]\n"
+ "add x16, x16, #8\n"
+ "mov v1.16b, v5.16b\n"
+ "add x17, x17, #8\n"
+ "fsub v8.4s, v8.4s, v2.4s\n"
+ "mov v2.16b, v5.16b\n"
+ "fmls v8.4s, v4.4s, v0.s[1]\n"
+ "fmls v11.4s, v4.4s, v0.s[3]\n"
+ "fmla v1.4s, v3.4s, v0.s[2]\n"
+ "fmls v2.4s, v22.4s, v0.s[2]\n"
+ "mov v3.16b, v5.16b\n"
+ "mov v4.16b, v5.16b\n"
+ "mov v5.16b, v5.16b\n"
+ "mov v6.16b, v6.16b\n"
+ "fmls v1.4s, v20.4s, v0.s[3]\n"
+ "fmls v2.4s, v20.4s, v0.s[2]\n"
+ "fmla v3.4s, v22.4s, v0.s[2]\n"
+ "fmls v4.4s, v22.4s, v0.s[1]\n"
+ "fmla v5.4s, v22.4s, v0.s[1]\n"
+ "fmla v6.4s, v22.4s, v0.s[2]\n"
+ "fadd v2.4s, v2.4s, v16.4s\n"
+ "mov v12.16b, v17.16b\n"
+ "fmls v3.4s, v20.4s, v0.s[2]\n"
+ "fsub v4.4s, v4.4s, v20.4s\n"
+ "fmla v4.4s, v16.4s, v0.s[1]\n"
+ "fsub v5.4s, v5.4s, v20.4s\n"
+ "fmls v5.4s, v16.4s, v0.s[1]\n"
+ "fmls v6.4s, v16.4s, v0.s[3]\n"
+ "fsub v3.4s, v3.4s, v16.4s\n"
+ "fmla v12.4s, v19.4s, v0.s[2]\n"
+ "mov v19.16b, v17.16b\n"
+ "mov v20.16b, v17.16b\n"
+ "mov v16.16b, v17.16b\n"
+ "mov v17.16b, v17.16b\n"
+ "fmls v12.4s, v15.4s, v0.s[3]\n"
+ "fmls v19.4s, v21.4s, v0.s[2]\n"
+ "fmla v20.4s, v21.4s, v0.s[2]\n"
+ "fmls v16.4s, v21.4s, v0.s[1]\n"
+ "fmla v17.4s, v21.4s, v0.s[1]\n"
+ "mov v18.16b, v18.16b\n"
+ "fmls v19.4s, v15.4s, v0.s[2]\n"
+ "mov v23.16b, v12.16b\n"
+ "fmls v20.4s, v15.4s, v0.s[2]\n"
+ "fsub v16.4s, v16.4s, v15.4s\n"
+ "fmla v16.4s, v13.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v15.4s\n"
+ "fadd v19.4s, v19.4s, v13.4s\n"
+ "fmls v17.4s, v13.4s, v0.s[1]\n"
+ "fsub v20.4s, v20.4s, v13.4s\n"
+ "fmla v18.4s, v21.4s, v0.s[2]\n"
+ "fmla v23.4s, v14.4s, v0.s[2]\n"
+ "mov v15.16b, v19.16b\n"
+ "mov v14.16b, v20.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "fmls v18.4s, v13.4s, v0.s[3]\n"
+ "fmla v15.4s, v10.4s, v0.s[2]\n"
+ "fmls v23.4s, v1.4s, v0.s[3]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "fmla v24.4s, v7.4s, v0.s[2]\n"
+ "mov v10.16b, v17.16b\n"
+ "fmls v15.4s, v2.4s, v0.s[3]\n"
+ "mov v7.16b, v18.16b\n"
+ "str d23, [%[outptr0]]\n"
+ "fmls v14.4s, v3.4s, v0.s[3]\n"
+ "fmls v24.4s, v4.4s, v0.s[3]\n"
+ "fmla v10.4s, v8.4s, v0.s[2]\n"
+ "str d15, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v7.4s, v11.4s, v0.s[2]\n"
+ "str d14, [%[outptr0], x11]\n"
+ "fmls v10.4s, v5.4s, v0.s[3]\n"
+ "str d24, [%[outptr0], x13]\n"
+ "fmls v7.4s, v6.4s, v0.s[3]\n"
+ "str d10, [%[outptr0], x23]\n"
+ "str d7, [%[outptr0], x15]\n"
+ "add %[outptr0], %[outptr0], #8\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v25.16b, v19.16b\n"
+ "ldr d11, [x25, x20]\n"
+ "mov v10.16b, v11.16b\n"
+ "ldr d23, [x25, x18]\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr d7, [x25]\n"
+ "fmla v10.4s, v7.4s, v0.s[2]\n"
+ "ldr d13, [x25, x21]\n"
+ "mov v7.16b, v11.16b\n"
+ "ldr d31, [x25, x19]\n"
+ "mov v8.16b, v11.16b\n"
+ "ldr d21, [x25, %[input_col_stride1]]\n"
+ "fmls v10.4s, v23.4s, v0.s[3]\n"
+ "ldr d30, [x26, x20]\n"
+ "fmls v9.4s, v21.4s, v0.s[2]\n"
+ "ldr d29, [x26, x18]\n"
+ "fmla v7.4s, v21.4s, v0.s[2]\n"
+ "ldr d22, [x26]\n"
+ "fmls v8.4s, v21.4s, v0.s[1]\n"
+ "ldr d24, [x26, x21]\n"
+ "fmls v9.4s, v23.4s, v0.s[2]\n"
+ "ldr d27, [x26, x19]\n"
+ "fmls v7.4s, v23.4s, v0.s[2]\n"
+ "ldr d28, [x26, %[input_col_stride1]]\n"
+ "fsub v8.4s, v8.4s, v23.4s\n"
+ "add x25, x25, #8\n"
+ "fadd v9.4s, v9.4s, v31.4s\n"
+ "add x26, x26, #8\n"
+ "fsub v7.4s, v7.4s, v31.4s\n"
+ "fmla v8.4s, v31.4s, v0.s[1]\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v15.16b, v13.16b\n"
+ "mov v14.16b, v30.16b\n"
+ "mov v13.16b, v30.16b\n"
+ "fmla v11.4s, v21.4s, v0.s[1]\n"
+ "fmla v15.4s, v21.4s, v0.s[2]\n"
+ "fmla v14.4s, v22.4s, v0.s[2]\n"
+ "fmls v13.4s, v28.4s, v0.s[2]\n"
+ "mov v21.16b, v30.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "fsub v11.4s, v11.4s, v23.4s\n"
+ "fmls v15.4s, v31.4s, v0.s[3]\n"
+ "fmls v11.4s, v31.4s, v0.s[1]\n"
+ "fmls v14.4s, v29.4s, v0.s[3]\n"
+ "fmls v13.4s, v29.4s, v0.s[2]\n"
+ "fmla v21.4s, v28.4s, v0.s[2]\n"
+ "fmls v22.4s, v28.4s, v0.s[1]\n"
+ "mov v23.16b, v30.16b\n"
+ "mov v24.16b, v24.16b\n"
+ "fmls v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v13.4s, v13.4s, v27.4s\n"
+ "fmls v21.4s, v29.4s, v0.s[2]\n"
+ "fsub v22.4s, v22.4s, v29.4s\n"
+ "fmla v23.4s, v28.4s, v0.s[1]\n"
+ "fmla v22.4s, v27.4s, v0.s[1]\n"
+ "fmla v24.4s, v28.4s, v0.s[2]\n"
+ "fsub v21.4s, v21.4s, v27.4s\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fsub v23.4s, v23.4s, v29.4s\n"
+ "fmls v25.4s, v9.4s, v0.s[2]\n"
+ "fmls v23.4s, v27.4s, v0.s[1]\n"
+ "fmls v24.4s, v27.4s, v0.s[3]\n"
+ "fadd v26.4s, v26.4s, v14.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str d26, [x28]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "fmls v27.4s, v7.4s, v0.s[2]\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v17.16b\n"
+ "mov v29.16b, v18.16b\n"
+ "fadd v25.4s, v25.4s, v13.4s\n"
+ "fmls v31.4s, v8.4s, v0.s[2]\n"
+ "str d25, [x28, %[output_col_stride1]]\n"
+ "fmls v27.4s, v3.4s, v0.s[2]\n"
+ "fmls v30.4s, v11.4s, v0.s[2]\n"
+ "fmls v29.4s, v15.4s, v0.s[2]\n"
+ "fmls v31.4s, v4.4s, v0.s[2]\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v27.4s, v27.4s, v21.4s\n"
+ "mov v25.16b, v19.16b\n"
+ "str d27, [x28, x11]\n"
+ "fmls v30.4s, v5.4s, v0.s[2]\n"
+ "fadd v31.4s, v31.4s, v22.4s\n"
+ "fmls v29.4s, v6.4s, v0.s[2]\n"
+ "str d31, [x28, x13]\n"
+ "fmla v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v30.4s, v30.4s, v23.4s\n"
+ "fmla v25.4s, v9.4s, v0.s[2]\n"
+ "str d30, [x28, x23]\n"
+ "fadd v29.4s, v29.4s, v24.4s\n"
+ "str d29, [x28, x15]\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "add x28, x28, #8\n"
+ "mov v30.16b, v20.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "fsub v26.4s, v26.4s, v14.4s\n"
+ "mov v28.16b, v17.16b\n"
+ "str d26, [x22]\n"
+ "fsub v25.4s, v25.4s, v13.4s\n"
+ "str d25, [x22, %[output_col_stride1]]\n"
+ "fmla v30.4s, v7.4s, v0.s[2]\n"
+ "fmla v29.4s, v8.4s, v0.s[2]\n"
+ "fmla v28.4s, v11.4s, v0.s[2]\n"
+ "mov v26.16b, v18.16b\n"
+ "mov v25.16b, v12.16b\n"
+ "fmls v30.4s, v3.4s, v0.s[2]\n"
+ "mov v31.16b, v19.16b\n"
+ "fmls v29.4s, v4.4s, v0.s[2]\n"
+ "fmls v28.4s, v5.4s, v0.s[2]\n"
+ "fmla v26.4s, v15.4s, v0.s[2]\n"
+ "fmls v25.4s, v10.4s, v0.s[1]\n"
+ "fsub v30.4s, v30.4s, v21.4s\n"
+ "fmls v31.4s, v9.4s, v0.s[1]\n"
+ "str d30, [x22, x11]\n"
+ "fsub v29.4s, v29.4s, v22.4s\n"
+ "str d29, [x22, x13]\n"
+ "fsub v28.4s, v28.4s, v23.4s\n"
+ "str d28, [x22, x23]\n"
+ "fmls v26.4s, v6.4s, v0.s[2]\n"
+ "fsub v25.4s, v25.4s, v1.4s\n"
+ "fsub v31.4s, v31.4s, v2.4s\n"
+ "fmla v25.4s, v14.4s, v0.s[1]\n"
+ "fmla v31.4s, v13.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v24.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str d26, [x22, x15]\n"
+ "mov v26.16b, v16.16b\n"
+ "str d25, [x12]\n"
+ "fmls v27.4s, v7.4s, v0.s[1]\n"
+ "str d31, [x12, %[output_col_stride1]]\n"
+ "fmls v26.4s, v8.4s, v0.s[1]\n"
+ "mov v25.16b, v17.16b\n"
+ "add x22, x22, #8\n"
+ "fsub v27.4s, v27.4s, v3.4s\n"
+ "mov v28.16b, v18.16b\n"
+ "fmla v27.4s, v21.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v4.4s\n"
+ "fmla v26.4s, v22.4s, v0.s[1]\n"
+ "fmls v25.4s, v11.4s, v0.s[1]\n"
+ "fmls v28.4s, v15.4s, v0.s[1]\n"
+ "mov v12.16b, v12.16b\n"
+ "str d27, [x12, x11]\n"
+ "mov v19.16b, v19.16b\n"
+ "str d26, [x12, x13]\n"
+ "fsub v25.4s, v25.4s, v5.4s\n"
+ "fmla v25.4s, v23.4s, v0.s[1]\n"
+ "fsub v28.4s, v28.4s, v6.4s\n"
+ "fmla v28.4s, v24.4s, v0.s[1]\n"
+ "fmla v12.4s, v10.4s, v0.s[1]\n"
+ "fmla v19.4s, v9.4s, v0.s[1]\n"
+ "mov v20.16b, v20.16b\n"
+ "str d25, [x12, x23]\n"
+ "mov v16.16b, v16.16b\n"
+ "str d28, [x12, x15]\n"
+ "fsub v12.4s, v12.4s, v1.4s\n"
+ "fmls v12.4s, v14.4s, v0.s[1]\n"
+ "add x12, x12, #8\n"
+ "fsub v19.4s, v19.4s, v2.4s\n"
+ "fmla v20.4s, v7.4s, v0.s[1]\n"
+ "fmls v19.4s, v13.4s, v0.s[1]\n"
+ "fmla v16.4s, v8.4s, v0.s[1]\n"
+ "str d12, [x14]\n"
+ "mov v1.16b, v17.16b\n"
+ "fsub v20.4s, v20.4s, v3.4s\n"
+ "mov v17.16b, v18.16b\n"
+ "str d19, [x14, %[output_col_stride1]]\n"
+ "fmls v20.4s, v21.4s, v0.s[1]\n"
+ "fsub v16.4s, v16.4s, v4.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fmls v16.4s, v22.4s, v0.s[1]\n"
+ "fmla v17.4s, v15.4s, v0.s[1]\n"
+ "str d20, [x14, x11]\n"
+ "fsub v1.4s, v1.4s, v5.4s\n"
+ "str d16, [x14, x13]\n"
+ "fmls v1.4s, v23.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v6.4s\n"
+ "fmls v17.4s, v24.4s, v0.s[1]\n"
+ "str d1, [x14, x23]\n"
+ "str d17, [x14, x15]\n"
+ "add x14, x14, #8\n"
+ "ldr d2, [x27, x20]\n"
+ "mov v4.16b, v2.16b\n"
+ "ldr d17, [x27, x18]\n"
+ "mov v12.16b, v2.16b\n"
+ "ldr d18, [x27]\n"
+ "fmla v4.4s, v18.4s, v0.s[2]\n"
+ "ldr d3, [x27, x21]\n"
+ "mov v6.16b, v2.16b\n"
+ "ldr d5, [x27, x19]\n"
+ "mov v1.16b, v2.16b\n"
+ "ldr d18, [x27, %[input_col_stride1]]\n"
+ "fmls v4.4s, v17.4s, v0.s[3]\n"
+ "add x27, x27, #8\n"
+ "fmls v12.4s, v18.4s, v0.s[2]\n"
+ "sub %w[n_channels], %w[n_channels], #2\n"
+ "fmla v6.4s, v18.4s, v0.s[2]\n"
+ "fmls v1.4s, v18.4s, v0.s[1]\n"
+ "mov v2.16b, v2.16b\n"
+ "mov v3.16b, v3.16b\n"
+ "fmls v12.4s, v17.4s, v0.s[2]\n"
+ "mov v4.16b, v4.16b\n"
+ "fmls v6.4s, v17.4s, v0.s[2]\n"
+ "fsub v1.4s, v1.4s, v17.4s\n"
+ "fmla v1.4s, v5.4s, v0.s[1]\n"
+ "fmla v2.4s, v18.4s, v0.s[1]\n"
+ "fadd v12.4s, v12.4s, v5.4s\n"
+ "fmla v3.4s, v18.4s, v0.s[2]\n"
+ "fsub v6.4s, v6.4s, v5.4s\n"
+ "fmla v4.4s, v10.4s, v0.s[2]\n"
+ "fsub v2.4s, v2.4s, v17.4s\n"
+ "mov v16.16b, v12.16b\n"
+ "fmls v2.4s, v5.4s, v0.s[1]\n"
+ "fmls v3.4s, v5.4s, v0.s[3]\n"
+ "fmls v4.4s, v14.4s, v0.s[3]\n"
+ "fmla v16.4s, v9.4s, v0.s[2]\n"
+ "mov v5.16b, v6.16b\n"
+ "mov v6.16b, v1.16b\n"
+ "mov v9.16b, v2.16b\n"
+ "mov v10.16b, v3.16b\n"
+ "str d4, [x24]\n"
+ "fmls v16.4s, v13.4s, v0.s[3]\n"
+ "fmla v5.4s, v7.4s, v0.s[2]\n"
+ "fmla v6.4s, v8.4s, v0.s[2]\n"
+ "fmla v9.4s, v11.4s, v0.s[2]\n"
+ "fmla v10.4s, v15.4s, v0.s[2]\n"
+ "str d16, [x24, %[output_col_stride1]]\n"
+ "fmls v5.4s, v21.4s, v0.s[3]\n"
+ "fmls v6.4s, v22.4s, v0.s[3]\n"
+ "fmls v9.4s, v23.4s, v0.s[3]\n"
+ "fmls v10.4s, v24.4s, v0.s[3]\n"
+ "str d5, [x24, x11]\n"
+ "str d6, [x24, x13]\n"
+ "str d9, [x24, x23]\n"
+ "str d10, [x24, x15]\n"
+ "add x24, x24, #8\n"
+ "3:\n"
+ "cbz %w[n_channels], 4f\n"
+ "ldr s8, [%[inptr0], x20]\n"
+ "mov v14.16b, v8.16b\n"
+ "ldr s2, [%[inptr0], x18]\n"
+ "mov v10.16b, v8.16b\n"
+ "ldr s9, [%[inptr0]]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "ldr s1, [%[inptr0], x21]\n"
+ "mov v9.16b, v8.16b\n"
+ "ldr s4, [%[inptr0], x19]\n"
+ "mov v7.16b, v8.16b\n"
+ "ldr s12, [%[inptr0], %[input_col_stride1]]\n"
+ "fmls v14.4s, v2.4s, v0.s[3]\n"
+ "ldr s5, [x16, x20]\n"
+ "fmls v10.4s, v12.4s, v0.s[2]\n"
+ "ldr s20, [x16, x18]\n"
+ "fmla v9.4s, v12.4s, v0.s[2]\n"
+ "ldr s3, [x16]\n"
+ "fmls v7.4s, v12.4s, v0.s[1]\n"
+ "ldr s6, [x16, x21]\n"
+ "fmls v10.4s, v2.4s, v0.s[2]\n"
+ "ldr s16, [x16, x19]\n"
+ "fmls v9.4s, v2.4s, v0.s[2]\n"
+ "ldr s22, [x16, %[input_col_stride1]]\n"
+ "fsub v7.4s, v7.4s, v2.4s\n"
+ "ldr s17, [x17, x20]\n"
+ "fadd v10.4s, v10.4s, v4.4s\n"
+ "ldr s15, [x17, x18]\n"
+ "fsub v9.4s, v9.4s, v4.4s\n"
+ "ldr s19, [x17]\n"
+ "fmla v7.4s, v4.4s, v0.s[1]\n"
+ "ldr s18, [x17, x21]\n"
+ "mov v8.16b, v8.16b\n"
+ "ldr s13, [x17, x19]\n"
+ "mov v11.16b, v1.16b\n"
+ "ldr s21, [x17, %[input_col_stride1]]\n"
+ "fmla v8.4s, v12.4s, v0.s[1]\n"
+ "add %[inptr0], %[inptr0], #4\n"
+ "fmla v11.4s, v12.4s, v0.s[2]\n"
+ "add x16, x16, #4\n"
+ "mov v1.16b, v5.16b\n"
+ "add x17, x17, #4\n"
+ "fsub v8.4s, v8.4s, v2.4s\n"
+ "mov v2.16b, v5.16b\n"
+ "fmls v8.4s, v4.4s, v0.s[1]\n"
+ "fmls v11.4s, v4.4s, v0.s[3]\n"
+ "fmla v1.4s, v3.4s, v0.s[2]\n"
+ "fmls v2.4s, v22.4s, v0.s[2]\n"
+ "mov v3.16b, v5.16b\n"
+ "mov v4.16b, v5.16b\n"
+ "mov v5.16b, v5.16b\n"
+ "mov v6.16b, v6.16b\n"
+ "fmls v1.4s, v20.4s, v0.s[3]\n"
+ "fmls v2.4s, v20.4s, v0.s[2]\n"
+ "fmla v3.4s, v22.4s, v0.s[2]\n"
+ "fmls v4.4s, v22.4s, v0.s[1]\n"
+ "fmla v5.4s, v22.4s, v0.s[1]\n"
+ "fmla v6.4s, v22.4s, v0.s[2]\n"
+ "fadd v2.4s, v2.4s, v16.4s\n"
+ "mov v12.16b, v17.16b\n"
+ "fmls v3.4s, v20.4s, v0.s[2]\n"
+ "fsub v4.4s, v4.4s, v20.4s\n"
+ "fmla v4.4s, v16.4s, v0.s[1]\n"
+ "fsub v5.4s, v5.4s, v20.4s\n"
+ "fmls v5.4s, v16.4s, v0.s[1]\n"
+ "fmls v6.4s, v16.4s, v0.s[3]\n"
+ "fsub v3.4s, v3.4s, v16.4s\n"
+ "fmla v12.4s, v19.4s, v0.s[2]\n"
+ "mov v19.16b, v17.16b\n"
+ "mov v20.16b, v17.16b\n"
+ "mov v16.16b, v17.16b\n"
+ "mov v17.16b, v17.16b\n"
+ "fmls v12.4s, v15.4s, v0.s[3]\n"
+ "fmls v19.4s, v21.4s, v0.s[2]\n"
+ "fmla v20.4s, v21.4s, v0.s[2]\n"
+ "fmls v16.4s, v21.4s, v0.s[1]\n"
+ "fmla v17.4s, v21.4s, v0.s[1]\n"
+ "mov v18.16b, v18.16b\n"
+ "fmls v19.4s, v15.4s, v0.s[2]\n"
+ "mov v23.16b, v12.16b\n"
+ "fmls v20.4s, v15.4s, v0.s[2]\n"
+ "fsub v16.4s, v16.4s, v15.4s\n"
+ "fmla v16.4s, v13.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v15.4s\n"
+ "fadd v19.4s, v19.4s, v13.4s\n"
+ "fmls v17.4s, v13.4s, v0.s[1]\n"
+ "fsub v20.4s, v20.4s, v13.4s\n"
+ "fmla v18.4s, v21.4s, v0.s[2]\n"
+ "fmla v23.4s, v14.4s, v0.s[2]\n"
+ "mov v15.16b, v19.16b\n"
+ "mov v14.16b, v20.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "fmls v18.4s, v13.4s, v0.s[3]\n"
+ "fmla v15.4s, v10.4s, v0.s[2]\n"
+ "fmls v23.4s, v1.4s, v0.s[3]\n"
+ "fmla v14.4s, v9.4s, v0.s[2]\n"
+ "fmla v24.4s, v7.4s, v0.s[2]\n"
+ "mov v10.16b, v17.16b\n"
+ "fmls v15.4s, v2.4s, v0.s[3]\n"
+ "mov v7.16b, v18.16b\n"
+ "str s23, [%[outptr0]]\n"
+ "fmls v14.4s, v3.4s, v0.s[3]\n"
+ "fmls v24.4s, v4.4s, v0.s[3]\n"
+ "fmla v10.4s, v8.4s, v0.s[2]\n"
+ "str s15, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v7.4s, v11.4s, v0.s[2]\n"
+ "str s14, [%[outptr0], x11]\n"
+ "fmls v10.4s, v5.4s, v0.s[3]\n"
+ "str s24, [%[outptr0], x13]\n"
+ "fmls v7.4s, v6.4s, v0.s[3]\n"
+ "str s10, [%[outptr0], x23]\n"
+ "str s7, [%[outptr0], x15]\n"
+ "add %[outptr0], %[outptr0], #4\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v25.16b, v19.16b\n"
+ "ldr s11, [x25, x20]\n"
+ "mov v10.16b, v11.16b\n"
+ "ldr s23, [x25, x18]\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr s7, [x25]\n"
+ "fmla v10.4s, v7.4s, v0.s[2]\n"
+ "ldr s13, [x25, x21]\n"
+ "mov v7.16b, v11.16b\n"
+ "ldr s31, [x25, x19]\n"
+ "mov v8.16b, v11.16b\n"
+ "ldr s21, [x25, %[input_col_stride1]]\n"
+ "fmls v10.4s, v23.4s, v0.s[3]\n"
+ "ldr s30, [x26, x20]\n"
+ "fmls v9.4s, v21.4s, v0.s[2]\n"
+ "ldr s29, [x26, x18]\n"
+ "fmla v7.4s, v21.4s, v0.s[2]\n"
+ "ldr s22, [x26]\n"
+ "fmls v8.4s, v21.4s, v0.s[1]\n"
+ "ldr s24, [x26, x21]\n"
+ "fmls v9.4s, v23.4s, v0.s[2]\n"
+ "ldr s27, [x26, x19]\n"
+ "fmls v7.4s, v23.4s, v0.s[2]\n"
+ "ldr s28, [x26, %[input_col_stride1]]\n"
+ "fsub v8.4s, v8.4s, v23.4s\n"
+ "add x25, x25, #4\n"
+ "fadd v9.4s, v9.4s, v31.4s\n"
+ "add x26, x26, #4\n"
+ "fsub v7.4s, v7.4s, v31.4s\n"
+ "fmla v8.4s, v31.4s, v0.s[1]\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v15.16b, v13.16b\n"
+ "mov v14.16b, v30.16b\n"
+ "mov v13.16b, v30.16b\n"
+ "fmla v11.4s, v21.4s, v0.s[1]\n"
+ "fmla v15.4s, v21.4s, v0.s[2]\n"
+ "fmla v14.4s, v22.4s, v0.s[2]\n"
+ "fmls v13.4s, v28.4s, v0.s[2]\n"
+ "mov v21.16b, v30.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "fsub v11.4s, v11.4s, v23.4s\n"
+ "fmls v15.4s, v31.4s, v0.s[3]\n"
+ "fmls v11.4s, v31.4s, v0.s[1]\n"
+ "fmls v14.4s, v29.4s, v0.s[3]\n"
+ "fmls v13.4s, v29.4s, v0.s[2]\n"
+ "fmla v21.4s, v28.4s, v0.s[2]\n"
+ "fmls v22.4s, v28.4s, v0.s[1]\n"
+ "mov v23.16b, v30.16b\n"
+ "mov v24.16b, v24.16b\n"
+ "fmls v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v13.4s, v13.4s, v27.4s\n"
+ "fmls v21.4s, v29.4s, v0.s[2]\n"
+ "fsub v22.4s, v22.4s, v29.4s\n"
+ "fmla v23.4s, v28.4s, v0.s[1]\n"
+ "fmla v22.4s, v27.4s, v0.s[1]\n"
+ "fmla v24.4s, v28.4s, v0.s[2]\n"
+ "fsub v21.4s, v21.4s, v27.4s\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fsub v23.4s, v23.4s, v29.4s\n"
+ "fmls v25.4s, v9.4s, v0.s[2]\n"
+ "fmls v23.4s, v27.4s, v0.s[1]\n"
+ "fmls v24.4s, v27.4s, v0.s[3]\n"
+ "fadd v26.4s, v26.4s, v14.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str s26, [x28]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "fmls v27.4s, v7.4s, v0.s[2]\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v17.16b\n"
+ "mov v29.16b, v18.16b\n"
+ "fadd v25.4s, v25.4s, v13.4s\n"
+ "fmls v31.4s, v8.4s, v0.s[2]\n"
+ "str s25, [x28, %[output_col_stride1]]\n"
+ "fmls v27.4s, v3.4s, v0.s[2]\n"
+ "fmls v30.4s, v11.4s, v0.s[2]\n"
+ "fmls v29.4s, v15.4s, v0.s[2]\n"
+ "fmls v31.4s, v4.4s, v0.s[2]\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v27.4s, v27.4s, v21.4s\n"
+ "mov v25.16b, v19.16b\n"
+ "str s27, [x28, x11]\n"
+ "fmls v30.4s, v5.4s, v0.s[2]\n"
+ "fadd v31.4s, v31.4s, v22.4s\n"
+ "fmls v29.4s, v6.4s, v0.s[2]\n"
+ "str s31, [x28, x13]\n"
+ "fmla v26.4s, v10.4s, v0.s[2]\n"
+ "fadd v30.4s, v30.4s, v23.4s\n"
+ "fmla v25.4s, v9.4s, v0.s[2]\n"
+ "str s30, [x28, x23]\n"
+ "fadd v29.4s, v29.4s, v24.4s\n"
+ "str s29, [x28, x15]\n"
+ "fmls v26.4s, v1.4s, v0.s[2]\n"
+ "fmls v25.4s, v2.4s, v0.s[2]\n"
+ "add x28, x28, #4\n"
+ "mov v30.16b, v20.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "fsub v26.4s, v26.4s, v14.4s\n"
+ "mov v28.16b, v17.16b\n"
+ "str s26, [x22]\n"
+ "fsub v25.4s, v25.4s, v13.4s\n"
+ "str s25, [x22, %[output_col_stride1]]\n"
+ "fmla v30.4s, v7.4s, v0.s[2]\n"
+ "fmla v29.4s, v8.4s, v0.s[2]\n"
+ "fmla v28.4s, v11.4s, v0.s[2]\n"
+ "mov v26.16b, v18.16b\n"
+ "mov v25.16b, v12.16b\n"
+ "fmls v30.4s, v3.4s, v0.s[2]\n"
+ "mov v31.16b, v19.16b\n"
+ "fmls v29.4s, v4.4s, v0.s[2]\n"
+ "fmls v28.4s, v5.4s, v0.s[2]\n"
+ "fmla v26.4s, v15.4s, v0.s[2]\n"
+ "fmls v25.4s, v10.4s, v0.s[1]\n"
+ "fsub v30.4s, v30.4s, v21.4s\n"
+ "fmls v31.4s, v9.4s, v0.s[1]\n"
+ "str s30, [x22, x11]\n"
+ "fsub v29.4s, v29.4s, v22.4s\n"
+ "str s29, [x22, x13]\n"
+ "fsub v28.4s, v28.4s, v23.4s\n"
+ "str s28, [x22, x23]\n"
+ "fmls v26.4s, v6.4s, v0.s[2]\n"
+ "fsub v25.4s, v25.4s, v1.4s\n"
+ "fsub v31.4s, v31.4s, v2.4s\n"
+ "fmla v25.4s, v14.4s, v0.s[1]\n"
+ "fmla v31.4s, v13.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v24.4s\n"
+ "mov v27.16b, v20.16b\n"
+ "str s26, [x22, x15]\n"
+ "mov v26.16b, v16.16b\n"
+ "str s25, [x12]\n"
+ "fmls v27.4s, v7.4s, v0.s[1]\n"
+ "str s31, [x12, %[output_col_stride1]]\n"
+ "fmls v26.4s, v8.4s, v0.s[1]\n"
+ "mov v25.16b, v17.16b\n"
+ "add x22, x22, #4\n"
+ "fsub v27.4s, v27.4s, v3.4s\n"
+ "mov v28.16b, v18.16b\n"
+ "fmla v27.4s, v21.4s, v0.s[1]\n"
+ "fsub v26.4s, v26.4s, v4.4s\n"
+ "fmla v26.4s, v22.4s, v0.s[1]\n"
+ "fmls v25.4s, v11.4s, v0.s[1]\n"
+ "fmls v28.4s, v15.4s, v0.s[1]\n"
+ "mov v12.16b, v12.16b\n"
+ "str s27, [x12, x11]\n"
+ "mov v19.16b, v19.16b\n"
+ "str s26, [x12, x13]\n"
+ "fsub v25.4s, v25.4s, v5.4s\n"
+ "fmla v25.4s, v23.4s, v0.s[1]\n"
+ "fsub v28.4s, v28.4s, v6.4s\n"
+ "fmla v28.4s, v24.4s, v0.s[1]\n"
+ "fmla v12.4s, v10.4s, v0.s[1]\n"
+ "fmla v19.4s, v9.4s, v0.s[1]\n"
+ "mov v20.16b, v20.16b\n"
+ "str s25, [x12, x23]\n"
+ "mov v16.16b, v16.16b\n"
+ "str s28, [x12, x15]\n"
+ "fsub v12.4s, v12.4s, v1.4s\n"
+ "fmls v12.4s, v14.4s, v0.s[1]\n"
+ "add x12, x12, #4\n"
+ "fsub v19.4s, v19.4s, v2.4s\n"
+ "fmla v20.4s, v7.4s, v0.s[1]\n"
+ "fmls v19.4s, v13.4s, v0.s[1]\n"
+ "fmla v16.4s, v8.4s, v0.s[1]\n"
+ "str s12, [x14]\n"
+ "mov v1.16b, v17.16b\n"
+ "fsub v20.4s, v20.4s, v3.4s\n"
+ "mov v17.16b, v18.16b\n"
+ "str s19, [x14, %[output_col_stride1]]\n"
+ "fmls v20.4s, v21.4s, v0.s[1]\n"
+ "fsub v16.4s, v16.4s, v4.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fmls v16.4s, v22.4s, v0.s[1]\n"
+ "fmla v17.4s, v15.4s, v0.s[1]\n"
+ "str s20, [x14, x11]\n"
+ "fsub v1.4s, v1.4s, v5.4s\n"
+ "str s16, [x14, x13]\n"
+ "fmls v1.4s, v23.4s, v0.s[1]\n"
+ "fsub v17.4s, v17.4s, v6.4s\n"
+ "fmls v17.4s, v24.4s, v0.s[1]\n"
+ "str s1, [x14, x23]\n"
+ "str s17, [x14, x15]\n"
+ "add x14, x14, #4\n"
+ "ldr s2, [x27, x20]\n"
+ "mov v4.16b, v2.16b\n"
+ "ldr s17, [x27, x18]\n"
+ "mov v12.16b, v2.16b\n"
+ "ldr s18, [x27]\n"
+ "fmla v4.4s, v18.4s, v0.s[2]\n"
+ "ldr s3, [x27, x21]\n"
+ "mov v6.16b, v2.16b\n"
+ "ldr s5, [x27, x19]\n"
+ "mov v1.16b, v2.16b\n"
+ "ldr s18, [x27, %[input_col_stride1]]\n"
+ "fmls v4.4s, v17.4s, v0.s[3]\n"
+ "add x27, x27, #4\n"
+ "fmls v12.4s, v18.4s, v0.s[2]\n"
+ "fmla v6.4s, v18.4s, v0.s[2]\n"
+ "fmls v1.4s, v18.4s, v0.s[1]\n"
+ "mov v2.16b, v2.16b\n"
+ "mov v3.16b, v3.16b\n"
+ "mov v4.16b, v4.16b\n"
+ "fmls v12.4s, v17.4s, v0.s[2]\n"
+ "fmls v6.4s, v17.4s, v0.s[2]\n"
+ "fsub v1.4s, v1.4s, v17.4s\n"
+ "fmla v2.4s, v18.4s, v0.s[1]\n"
+ "fmla v1.4s, v5.4s, v0.s[1]\n"
+ "fmla v3.4s, v18.4s, v0.s[2]\n"
+ "fadd v12.4s, v12.4s, v5.4s\n"
+ "fsub v6.4s, v6.4s, v5.4s\n"
+ "fsub v2.4s, v2.4s, v17.4s\n"
+ "fmla v4.4s, v10.4s, v0.s[2]\n"
+ "fmls v2.4s, v5.4s, v0.s[1]\n"
+ "fmls v3.4s, v5.4s, v0.s[3]\n"
+ "mov v16.16b, v12.16b\n"
+ "mov v5.16b, v6.16b\n"
+ "fmls v4.4s, v14.4s, v0.s[3]\n"
+ "mov v6.16b, v1.16b\n"
+ "fmla v16.4s, v9.4s, v0.s[2]\n"
+ "fmla v5.4s, v7.4s, v0.s[2]\n"
+ "fmla v6.4s, v8.4s, v0.s[2]\n"
+ "mov v9.16b, v2.16b\n"
+ "str s4, [x24]\n"
+ "mov v10.16b, v3.16b\n"
+ "fmls v16.4s, v13.4s, v0.s[3]\n"
+ "fmls v5.4s, v21.4s, v0.s[3]\n"
+ "fmls v6.4s, v22.4s, v0.s[3]\n"
+ "fmla v9.4s, v11.4s, v0.s[2]\n"
+ "fmla v10.4s, v15.4s, v0.s[2]\n"
+ "str s16, [x24, %[output_col_stride1]]\n"
+ "str s5, [x24, x11]\n"
+ "fmls v9.4s, v23.4s, v0.s[3]\n"
+ "str s6, [x24, x13]\n"
+ "fmls v10.4s, v24.4s, v0.s[3]\n"
+ "str s9, [x24, x23]\n"
+ "str s10, [x24, x15]\n"
+ "add x24, x24, #4\n"
+ "4:\n"
+ : [outptr0] "+r" (matrix_base),
+ [n_channels] "+r" (n_channels),
+ [inptr0] "+r" (input_base)
+ : [pcoeffs] "r" (pcoeffs),
+ [output_row_stride] "r" (6 * matrix_stride * sizeof(float)),
+ [output_col_stride1] "r" (matrix_stride * sizeof(float)),
+ [input_row_stride] "r" (input_row_stride * sizeof(float)),
+ [input_col_stride1] "r" (input_col_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
+ "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
+ "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8",
+ "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
+ "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+}
+
+#else // __arm__ not __aarch64__
+
+template <>
+void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile(
+ const int n_channels,
+ const float* const input_base,
+ const int input_row_stride,
+ const int input_col_stride,
+ float* outptr,
+ const int matrix_stride
+)
+{
+ constexpr int inner_tile_rows = 6;
+ constexpr int inner_tile_cols = 6;
+
+ // Get pointers into the input tile
+ const float *x_ptrs[inner_tile_rows][inner_tile_cols];
+ for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
+ {
+ // Get a pointer into the row
+ const float* const row_ptr = input_base + xi*input_row_stride;
+
+ for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+ {
+ x_ptrs[i][j] = row_ptr + xj*input_col_stride;
+ }
+ }
+
+ // Matrices used/computed in this kernel.
+ float x[inner_tile_rows][inner_tile_cols];
+ float XTx[inner_tile_rows][inner_tile_cols];
+ float U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = XTx[i][j] = 0.0f;
+ }
+ }
+
+ // Perform the Winograd input transformation for each channel in the input
+ // tensor.
+ int channels_remaining = n_channels;
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used/computed in this kernel
+ float32x2_t x[inner_tile_rows][inner_tile_cols];
+ float32x2_t XTx[inner_tile_rows][inner_tile_cols];
+ float32x2_t U[inner_tile_rows][inner_tile_cols];
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vdup_n_f32(0.0f);
+ XTx[i][j] = vdup_n_f32(0.0f);
+ }
+ }
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = vld1_f32(x_ptrs[i][j]);
+ x_ptrs[i][j] += 2;
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
+
+ // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
+
+ // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
+
+ // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
+
+ // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
+
+ // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
+
+ // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
+
+ // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
+
+ // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
+
+ // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
+
+ // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ vst1_f32(outptr + m*matrix_stride, U[i][j]);
+ }
+ }
+ outptr += 2;
+ }
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Load x
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[i][j] = *(x_ptrs[i][j]++);
+ }
+ }
+
+ // Compute XT . x
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
+ XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
+ XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
+ XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
+ XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
+ XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
+ }
+
+ // Compute U = XT . x . X
+ for (int i = 0; i < inner_tile_rows; i++)
+ {
+ U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
+ U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
+ U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
+ U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
+ U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
+ U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
+ }
+
+ // Store the transformed matrix
+ for (int i = 0, m = 0; i < inner_tile_rows; i++)
+ {
+ for (int j = 0; j < inner_tile_cols; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = U[i][j];
+ }
+ }
+ outptr++;
+ }
+}
+
+#endif
+
+template class InputTransform<6, 6, float, float, WinogradRoots::Integers>;
+
+} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp
index bad3ef2249..e45f1863e3 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,29 +22,45 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#pragma once
+#include "winograd.hpp"
using namespace winograd;
+#define MEMBERFN(RTYPE) template <\
+ int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE WeightTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots>
-template <int otr, int otc, int kr, int kc>
-template <typename T>
-WinogradGEMM<otr, otc, kr, kc>::WeightsTransform<T>::WeightsTransform(
- const T* const input,
- T* const output,
- const int matrix_stride, /** Stride across matrices in the output. */
- const int matrix_row_stride, /** Stride across rows of the matrix. */
+MEMBERFN()::WeightTransform(
const int n_output_channels,
const int n_input_channels
-) : inptr(input), outptr(output),
- matrix_stride(matrix_stride), matrix_row_stride(matrix_row_stride),
- n_output_channels(n_output_channels), n_input_channels(n_input_channels)
+) : _n_output_channels(n_output_channels), _n_input_channels(n_input_channels),
+ _matrices(nullptr), _matrix_stride(0), _matrix_row_stride(0), _weights(nullptr)
{
+
+}
+
+MEMBERFN(void)::set_weight_tensor(const void * const weights)
+{
+ _weights = static_cast<const TIn *>(weights);
+}
+
+MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow)
+{
+ _matrices = static_cast<TOut *>(mptr);
+ _matrix_stride = ldmatrix;
+ _matrix_row_stride = ldrow;
}
+MEMBERFN(size_t)::get_working_space_size(unsigned int) const
+{
+ return 0;
+}
-template <int otr, int otc, int kr, int kc>
-template <typename T>
-unsigned int WinogradGEMM<otr, otc, kr, kc>::WeightsTransform<T>::get_window() const
+MEMBERFN(void)::set_working_space(void *)
+{
+}
+
+MEMBERFN(unsigned int)::get_window(void) const
{
// TODO When the weights transform supports multithreading, return the number
// of output channels. For now we return 1 to indicate that the weights must
@@ -53,25 +69,10 @@ unsigned int WinogradGEMM<otr, otc, kr, kc>::WeightsTransform<T>::get_window() c
return 1;
}
-
-template <int otr, int otc, int kr, int kc>
-template <typename T>
-void WinogradGEMM<otr, otc, kr, kc>::WeightsTransform<T>::run(
- const unsigned int start, const unsigned int stop
-)
+MEMBERFN(void)::run(const unsigned int, const unsigned int, unsigned int)
{
- // TODO When the weights transform supports multithreading call execute for a
- // portion of the output channels.
- (void) start;
- (void) stop;
-
- // For now, just do all of the work.
execute(
- n_output_channels,
- n_input_channels,
- inptr,
- outptr,
- matrix_stride,
- matrix_row_stride
+ _n_output_channels, _n_input_channels, _weights,
+ _matrices, _matrix_stride, _matrix_row_stride
);
}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp
new file mode 100644
index 0000000000..d97af21a43
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include "winograd.hpp"
+#include "padding.hpp"
+#include "utils.hpp"
+
+#define MEMBERFN(RTYPE) template<\
+ int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols,\
+ typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots>
+
+#define Nx1MEMBERFN(RTYPE) template<\
+ int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots>
+
+namespace winograd
+{
+
+MEMBERFN()::OutputTransform(
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels
+) : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
+ _matrix_base(nullptr),
+ _biases(nullptr),
+ _matrix_stride(0), _matrix_row_stride(0), _matrix_batch_stride(0),
+ _outptr(nullptr),
+ _tiles_M(iceildiv(n_rows, output_tile_rows)),
+ _tiles_N(iceildiv(n_cols, output_tile_cols)),
+ _out_col_stride(0), _out_row_stride(0), _out_batch_stride(0),
+ _working_space_col_stride(n_channels),
+ _working_space_row_stride(output_tile_cols * _working_space_col_stride),
+ _working_space(nullptr)
+{
+}
+
+MEMBERFN(void)::set_input_matrices(const void * const mptr, const int ldmatrix, const int ldrow)
+{
+ _matrix_base = static_cast<const TIn *>(mptr);
+ _matrix_stride = ldmatrix;
+ _matrix_row_stride = ldrow;
+ _matrix_batch_stride = _tiles_M * _tiles_N * ldrow;
+}
+
+MEMBERFN(void)::set_bias(const void * const bias)
+{
+ _biases = static_cast<const TOut *>(bias);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr)
+{
+ set_output_tensor(outptr, _n_channels);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol)
+{
+ set_output_tensor(outptr, _n_cols * ldcol, ldcol);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol)
+{
+ set_output_tensor(outptr, _n_rows * ldrow, ldrow, ldcol);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+ _outptr = static_cast<TOut *>(outptr);
+ _out_batch_stride = ldbatch;
+ _out_row_stride = ldrow;
+ _out_col_stride = ldcol;
+}
+
+Nx1MEMBERFN()::OutputTransform(
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels
+) : OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::OutputTransform(
+ n_batches, n_cols, n_rows, n_channels /* Transpose rows and columns */
+ )
+{
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr)
+{
+ set_output_tensor(outptr, this->_n_channels);
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol)
+{
+ set_output_tensor(outptr, this->_n_cols * ldcol, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol)
+{
+ set_output_tensor(outptr, this->_n_rows * ldrow, ldrow, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+ // Transpose rows and columns
+ Base::set_output_tensor(outptr, ldbatch, ldcol, ldrow);
+}
+
+MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const
+{
+ return sizeof(TOut) * output_tile_rows * _working_space_row_stride * nthreads;
+}
+
+MEMBERFN(void)::set_working_space(void * const buffer)
+{
+ _working_space = static_cast<TOut *>(buffer);
+}
+
+MEMBERFN(unsigned int)::get_window(void) const
+{
+ return iceildiv(_n_channels, WINDOW_BLOCK);
+}
+
+MEMBERFN(void)::run(
+ const unsigned int start,
+ const unsigned int stop,
+ const unsigned int threadid
+)
+{
+ // Determine the channels on which to work
+ if (start >= get_window())
+ {
+ return; // No work to do beyond the end of the window
+ }
+ const unsigned int start_channel = start * WINDOW_BLOCK;
+ const unsigned int stop_channel = std::min<unsigned int>(_n_channels, stop * WINDOW_BLOCK);
+ const unsigned int n_channels = stop_channel - start_channel;
+
+ const auto matrix_tile_col_stride = _matrix_row_stride;
+ const auto matrix_tile_row_stride = _tiles_N * matrix_tile_col_stride;
+
+ const TOut* const bptr = (_biases == nullptr) ? nullptr : _biases + start_channel;
+
+ // Loop over batches
+ for (int batch = 0; batch < _n_batches; batch++)
+ {
+ const TIn* const matrix_batch = _matrix_base + start_channel + batch * _matrix_batch_stride;
+ TOut* const outptr_batch = _outptr + start_channel + batch * _out_batch_stride;
+
+ for (int tile_i = 0; tile_i < _tiles_M; tile_i++)
+ {
+ // Compute properties of the row of output tiles
+ const int row_pad_bottom = std::max(0, (tile_i + 1)*output_tile_rows - _n_rows);
+ const TIn* const matrix_tile_row = matrix_batch + tile_i * matrix_tile_row_stride;
+ TOut* const outptr_row = outptr_batch + tile_i * output_tile_rows * _out_row_stride;
+
+ for (int tile_j = 0; tile_j < _tiles_N; tile_j++)
+ {
+ // Compute property of this specific tile
+ const int tile_pad_right = std::max(0, (tile_j + 1)*output_tile_cols - _n_cols);
+ const TIn* const matrix_tile = matrix_tile_row + tile_j * matrix_tile_col_stride;
+ TOut* const outptr_tile = outptr_row + tile_j * output_tile_cols * _out_col_stride;
+
+ // Perform the transformation
+ if (row_pad_bottom || tile_pad_right)
+ {
+ transform_cropped_tile(
+ threadid, n_channels, outptr_tile, matrix_tile, bptr,
+ row_pad_bottom, tile_pad_right
+ );
+ }
+ else
+ {
+ transform_uncropped_tile(
+ threadid, n_channels, outptr_tile, matrix_tile, bptr
+ );
+ }
+ }
+ }
+ }
+}
+
+MEMBERFN(void)::transform_uncropped_tile(
+ const unsigned int /* threadid unused */,
+ const int n_channels,
+ TOut * const outptr,
+ const TIn * const inptr,
+ const TOut * const biases
+)
+{
+ transform_tile(
+ n_channels, inptr, _matrix_stride, biases,
+ outptr, _out_row_stride, _out_col_stride
+ );
+}
+
+MEMBERFN(void)::transform_cropped_tile(
+ const unsigned int threadid,
+ const int n_channels,
+ TOut * const outptr,
+ const TIn * const inptr,
+ const TOut * const biases,
+ const int pad_bottom,
+ const int pad_right
+)
+{
+ // Transform into working space and then copy the relevant section out.
+ TOut *wsptr = static_cast<TOut *>(get_working_space(threadid));
+ transform_tile(
+ n_channels, inptr, _matrix_stride, biases,
+ wsptr, _working_space_row_stride, _working_space_col_stride
+ );
+
+ padding::crop_and_copy_tile(
+ output_tile_rows, output_tile_cols, n_channels,
+ wsptr, _working_space_row_stride, _working_space_col_stride,
+ outptr, _out_row_stride, _out_col_stride,
+ 0u, 0u, pad_bottom, pad_right
+ );
+}
+
+MEMBERFN(void *)::get_working_space(const unsigned int threadid) const
+{
+ return _working_space + output_tile_rows * _working_space_row_stride * threadid;
+}
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
index ea842a45ee..c32d7f2f58 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,43 +22,29 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm.hpp"
+#include "output.hpp"
-namespace
+namespace winograd
{
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_2_7_fp32_process_tile(
+template <>
+void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
const int n_channels,
- const float* const matrix_base,
+ const float* inptr,
const int matrix_stride,
- const float* const biases,
+ const float* bptr,
float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
+ const int, // No need to stride across rows
+ const int output_col_stride
)
{
- (void) output_row_stride;
- (void) _pad_bottom;
- constexpr int output_tile_cols = 2;
- constexpr int inner_tile_cols = 8;
-
- const int pad_right = Specialized ? PadRight : _pad_right;
- const int cells_j = output_tile_cols - pad_right;
-
-
// Construct a map to the output cells
- float *outptrs[cells_j];
- for (int j = 0; j < cells_j; j++)
+ float *outptrs[output_tile_cols];
+ for (int j = 0; j < output_tile_cols; j++)
{
outptrs[j] = output + j*output_col_stride;
}
- const float *inptr = matrix_base;
- const float *bptr = biases;
// For each channel of the output
int channels_remaining = n_channels;
@@ -84,7 +70,7 @@ void winograd_output_transform_2_7_fp32_process_tile(
b = vld1q_f32(bptr);
bptr += 4;
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
vst1q_f32(outptrs[j], f[j] + b);
outptrs[j] += 4;
@@ -111,7 +97,7 @@ void winograd_output_transform_2_7_fp32_process_tile(
b = vld1_f32(bptr);
bptr += 2;
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
vst1_f32(outptrs[j], f[j] + b);
outptrs[j] += 2;
@@ -138,26 +124,14 @@ void winograd_output_transform_2_7_fp32_process_tile(
{
b = *(bptr++);
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
*(outptrs[j]++) = f[j] + b;
}
}
}
-} // namespace (anonymous)
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 7, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2_7_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_2_7_fp32_process_tile<true, 1>
-};
+template class OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>;
+template class OutputTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>;
-template class OutputTransform<1, 7, 1, 8, float>;
-template class OutputTransform<7, 1, 8, 1, float>;
} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..d6ebf44f41
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "output.hpp"
+
+namespace winograd
+{
+
+template <>
+void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transform_tile(
+ const int n_channels,
+ const float* inptr,
+ const int matrix_stride,
+ const float* bptr,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ // Construct a map to the output cells
+ float *outptrs[output_tile_rows][output_tile_cols];
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
+ }
+ }
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __aarch64__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[4][4], FZ[4][2], f[2][2], b;
+
+ // Read a 4x4 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 4; i++)
+ {
+ for (int j = 0; j < 4; j++, m++)
+ {
+ F[i][j] = vld1q_f32(inptr + m*matrix_stride);
+ }
+ }
+ inptr += 4;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 4; i++)
+ {
+ // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
+ FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
+
+ // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
+ FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 2; j++)
+ {
+ // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
+ f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
+
+ // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
+ f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
+ }
+
+ // Load the bias vector
+ if (bptr != nullptr)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ else
+ {
+ b = vdupq_n_f32(0.0f);
+ }
+
+ // Write out the output tile
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
+ outptrs[i][j] += 4;
+ }
+ }
+ }
+#endif // __aarch64__
+#ifdef __arm_any__
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[4][4], FZ[4][2], f[2][2], b;
+
+ // Read a 4x4 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 4; i++)
+ {
+ for (int j = 0; j < 4; j++, m++)
+ {
+ F[i][j] = vld1_f32(inptr + m*matrix_stride);
+ }
+ }
+ inptr += 2;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 4; i++)
+ {
+ // FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
+ FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
+
+ // FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
+ FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 2; j++)
+ {
+ // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
+ f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
+
+ // f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
+ f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
+ }
+
+ // Load the bias vector
+ if (bptr != nullptr)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ else
+ {
+ b = vdup_n_f32(0.0f);
+ }
+
+ // Write out the output tile
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
+ outptrs[i][j] += 2;
+ }
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[4][4], FZ[4][2], f[2][2], b;
+
+ // Read a 4x4 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 4; i++)
+ {
+ for (int j = 0; j < 4; j++, m++)
+ {
+ F[i][j] = *(inptr + m*matrix_stride);
+ }
+ }
+ inptr++;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 4; i++)
+ {
+ FZ[i][0] = F[i][0] + F[i][1] + F[i][2];
+ FZ[i][1] = F[i][1] - F[i][2] - F[i][3];
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 2; j++)
+ {
+ f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j];
+ f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j];
+ }
+
+ // Load the bias
+ if (bptr != nullptr)
+ {
+ b = *(bptr++);
+ }
+ else
+ {
+ b = 0.0f;
+ }
+
+ // Write out the output tile
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ *(outptrs[i][j]++) = f[i][j] + b;
+ }
+ }
+ }
+}
+
+template class OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>;
+
+} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..d93d9e234a
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "output.hpp"
+#include "arm.hpp"
+
+namespace winograd
+{
+
+template <>
+void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transform_tile(
+ const int n_channels,
+ const float* inptr,
+ const int matrix_stride,
+ const float* bptr,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ // Construct a map to the output cells
+ float *outptrs[output_tile_rows][output_tile_cols];
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
+ }
+ }
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __aarch64__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[6][6], FZ[6][2], f[2][2], b;
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ F[i][j] = vld1q_f32(inptr + m*matrix_stride);
+ }
+ }
+ inptr += 4;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 6; i++)
+ {
+ // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
+ FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
+
+ // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
+ FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 2; j++)
+ {
+ // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
+ f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
+
+ // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
+ f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
+ }
+
+ // Write out the output tile
+ if (bptr != nullptr)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ else
+ {
+ b = vdupq_n_f32(0.0f);
+ }
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
+ outptrs[i][j] += 4;
+ }
+ }
+ }
+#endif // __aarch64__
+#ifdef __arm_any__
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[6][6], FZ[6][2], f[2][2], b;
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ F[i][j] = vld1_f32(inptr + m*matrix_stride);
+ }
+ }
+ inptr += 2;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 6; i++)
+ {
+ // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
+ FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
+
+ // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
+ FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 2; j++)
+ {
+ // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
+ f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
+
+ // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
+ f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
+ }
+
+ // Write out the output tile
+ if (bptr != nullptr)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ else
+ {
+ b = vdup_n_f32(0.0f);
+ }
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
+ outptrs[i][j] += 2;
+ }
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[6][6], FZ[6][2], f[2][2], b;
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ F[i][j] = *(inptr + m*matrix_stride);
+ }
+ }
+ inptr++;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 6; i++)
+ {
+ FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
+ FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5];
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 2; j++)
+ {
+ f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
+ f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j];
+ }
+
+ // Write out the output tile
+ if (bptr != nullptr)
+ {
+ b = *(bptr++);
+ }
+ else
+ {
+ b = 0.0f;
+ }
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ *(outptrs[i][j]++) = f[i][j] + b;
+ }
+ }
+ }
+}
+
+template class OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
+
+} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
index 911759b128..7187ef2d20 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,42 +22,29 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "output.hpp"
+#include "arm.hpp"
-namespace
+namespace winograd
{
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_4_5_fp32_process_tile(
+template <>
+void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
const int n_channels,
- const float* const matrix_base,
+ const float* inptr,
const int matrix_stride,
- const float* const biases,
+ const float* bptr,
float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
+ const int, // No need to stride across rows
+ const int output_col_stride
)
{
- (void) output_row_stride;
- (void) _pad_bottom;
- constexpr int output_tile_cols = 4;
- constexpr int inner_tile_cols = 8;
-
- const int pad_right = Specialized ? PadRight : _pad_right;
- const int cells_j = output_tile_cols - pad_right;
-
// Construct a map to the output cells
- float *outptrs[cells_j];
- for (int j = 0; j < cells_j; j++)
+ float *outptrs[output_tile_cols];
+ for (int j = 0; j < output_tile_cols; j++)
{
outptrs[j] = output + j*output_col_stride;
}
- const float *inptr = matrix_base;
- const float *bptr = biases;
// For each channel of the output
int channels_remaining = n_channels;
@@ -85,7 +72,7 @@ void winograd_output_transform_4_5_fp32_process_tile(
b = vld1q_f32(bptr);
bptr += 4;
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
vst1q_f32(outptrs[j], f[j] + b);
outptrs[j] += 4;
@@ -114,7 +101,7 @@ void winograd_output_transform_4_5_fp32_process_tile(
b = vld1_f32(bptr);
bptr += 2;
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
vst1_f32(outptrs[j], f[j] + b);
outptrs[j] += 2;
@@ -143,29 +130,14 @@ void winograd_output_transform_4_5_fp32_process_tile(
{
b = *(bptr++);
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
*(outptrs[j]++) = f[j] + b;
}
}
}
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 5, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_4_5_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_4_5_fp32_process_tile<true, 1>,
- winograd_output_transform_4_5_fp32_process_tile<true, 2>,
- winograd_output_transform_4_5_fp32_process_tile<true, 3>
-};
+template class OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>;
+template class OutputTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>;
-template class OutputTransform<1, 5, 1, 8, float>;
-template class OutputTransform<5, 1, 8, 1, float>;
} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..fd16a4df1c
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,1855 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "output.hpp"
+
+namespace winograd
+{
+
+#ifdef __aarch64__
+
+template <>
+void OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile(
+ int n_channels,
+ const float* inptr,
+ const int matrix_stride,
+ const float* bptr,
+ float* output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ const float coeffs[2] = {2.0f, 4.0f};
+ if (bptr != nullptr)
+ {
+ __asm__ __volatile__ (
+ "ldr d0, [%[pcoeffs]]\n"
+ "add x21, %[in_col_stride1], %[in_col_stride1]\n"
+ "add x22, x21, %[in_col_stride1]\n"
+ "add x25, %[inptr0], %[in_row_stride]\n"
+ "add x15, %[output_col_stride1], %[output_col_stride1]\n"
+ "add x23, x22, %[in_col_stride1]\n"
+ "add x13, x25, %[in_row_stride]\n"
+ "add x16, x15, %[output_col_stride1]\n"
+ "add x24, x23, %[in_col_stride1]\n"
+ "add x26, x13, %[in_row_stride]\n"
+ "add x17, %[outptr0], %[output_row_stride]\n"
+ "add x14, x26, %[in_row_stride]\n"
+ "add x28, x17, %[output_row_stride]\n"
+ "lsr x19, %[n_channels], #2\n"
+ "add x27, x14, %[in_row_stride]\n"
+ "add x18, x28, %[output_row_stride]\n"
+ "and x20, %[n_channels], #3\n"
+ "cbz x19, 4f\n"
+ "1:\n"
+ "ldr q19, [%[inptr0]]\n"
+ "subs x19, x19, #1\n"
+ "ldr q20, [%[inptr0], %[in_col_stride1]]\n"
+ "ldr q4, [%[inptr0], x21]\n"
+ "fadd v1.4s, v20.4s, v4.4s\n"
+ "ldr q17, [%[inptr0], x22]\n"
+ "fsub v7.4s, v20.4s, v4.4s\n"
+ "ldr q22, [%[inptr0], x23]\n"
+ "fadd v5.4s, v17.4s, v22.4s\n"
+ "ldr q18, [%[inptr0], x24]\n"
+ "fsub v10.4s, v17.4s, v22.4s\n"
+ "ldr q25, [x25]\n"
+ "fadd v8.4s, v19.4s, v1.4s\n"
+ "ldr q12, [x25, %[in_col_stride1]]\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr q23, [x25, x21]\n"
+ "mov v1.16b, v7.16b\n"
+ "ldr q9, [x25, x22]\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "ldr q11, [x25, x23]\n"
+ "fadd v8.4s, v8.4s, v5.4s\n"
+ "ldr q6, [x25, x24]\n"
+ "fmla v4.4s, v5.4s, v0.s[1]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "fmla v1.4s, v10.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v18.4s\n"
+ "beq 3f\n"
+ "2:\n"
+ "fadd v3.4s, v12.4s, v23.4s\n"
+ "ldr q2, [x13]\n"
+ "fadd v27.4s, v9.4s, v11.4s\n"
+ "ldr q21, [x13, %[in_col_stride1]]\n"
+ "fsub v16.4s, v12.4s, v23.4s\n"
+ "ldr q26, [x13, x21]\n"
+ "fsub v9.4s, v9.4s, v11.4s\n"
+ "ldr q17, [x13, x22]\n"
+ "fadd v14.4s, v25.4s, v3.4s\n"
+ "ldr q19, [x13, x23]\n"
+ "mov v11.16b, v3.16b\n"
+ "ldr q10, [x13, x24]\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr q15, [x26]\n"
+ "fmul v9.4s, v9.4s, v0.s[0]\n"
+ "ldr q12, [x26, %[in_col_stride1]]\n"
+ "fadd v14.4s, v14.4s, v27.4s\n"
+ "ldr q20, [x26, x21]\n"
+ "fmla v11.4s, v27.4s, v0.s[1]\n"
+ "ldr q24, [x26, x22]\n"
+ "fadd v23.4s, v21.4s, v26.4s\n"
+ "ldr q29, [x26, x23]\n"
+ "fadd v13.4s, v16.4s, v9.4s\n"
+ "ldr q5, [x26, x24]\n"
+ "fmla v3.4s, v9.4s, v0.s[1]\n"
+ "ldr q18, [x14]\n"
+ "fadd v30.4s, v17.4s, v19.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fadd v16.4s, v2.4s, v23.4s\n"
+ "add x25, x25, #16\n"
+ "fsub v21.4s, v21.4s, v26.4s\n"
+ "ldr q22, [x14, %[in_col_stride1]]\n"
+ "fadd v3.4s, v3.4s, v6.4s\n"
+ "ldr q28, [x14, x21]\n"
+ "fsub v19.4s, v17.4s, v19.4s\n"
+ "add x13, x13, #16\n"
+ "fadd v16.4s, v16.4s, v30.4s\n"
+ "add x26, x26, #16\n"
+ "mov v17.16b, v23.16b\n"
+ "subs x19, x19, #1\n"
+ "fadd v26.4s, v12.4s, v20.4s\n"
+ "fsub v9.4s, v12.4s, v20.4s\n"
+ "fmul v19.4s, v19.4s, v0.s[0]\n"
+ "ldr q20, [x14, x22]\n"
+ "fmla v17.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v24.4s, v29.4s\n"
+ "fsub v12.4s, v24.4s, v29.4s\n"
+ "fadd v24.4s, v22.4s, v28.4s\n"
+ "fadd v23.4s, v15.4s, v26.4s\n"
+ "mov v15.16b, v26.16b\n"
+ "fsub v22.4s, v22.4s, v28.4s\n"
+ "fadd v29.4s, v14.4s, v16.4s\n"
+ "fsub v16.4s, v14.4s, v16.4s\n"
+ "ldr q28, [x14, x23]\n"
+ "fmul v12.4s, v12.4s, v0.s[0]\n"
+ "fmla v15.4s, v25.4s, v0.s[1]\n"
+ "fadd v23.4s, v23.4s, v25.4s\n"
+ "mov v6.16b, v21.16b\n"
+ "fadd v30.4s, v21.4s, v19.4s\n"
+ "fadd v26.4s, v18.4s, v24.4s\n"
+ "mov v25.16b, v24.16b\n"
+ "fadd v18.4s, v8.4s, v29.4s\n"
+ "fmla v6.4s, v19.4s, v0.s[1]\n"
+ "fadd v27.4s, v20.4s, v28.4s\n"
+ "fsub v21.4s, v20.4s, v28.4s\n"
+ "mov v19.16b, v29.16b\n"
+ "fadd v29.4s, v13.4s, v30.4s\n"
+ "fsub v8.4s, v13.4s, v30.4s\n"
+ "fadd v14.4s, v9.4s, v12.4s\n"
+ "fadd v6.4s, v6.4s, v10.4s\n"
+ "ldr q20, [x14, x24]\n"
+ "fadd v26.4s, v26.4s, v27.4s\n"
+ "add x14, x14, #16\n"
+ "fmla v9.4s, v12.4s, v0.s[1]\n"
+ "ldr q24, [x27]\n"
+ "fmul v21.4s, v21.4s, v0.s[0]\n"
+ "fmla v25.4s, v27.4s, v0.s[1]\n"
+ "fadd v10.4s, v7.4s, v29.4s\n"
+ "ldr q2, [%[bptr]]\n"
+ "mov v7.16b, v29.16b\n"
+ "add %[bptr], %[bptr], #16\n"
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "fadd v13.4s, v23.4s, v26.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "fadd v27.4s, v11.4s, v17.4s\n"
+ "fsub v11.4s, v11.4s, v17.4s\n"
+ "fadd v30.4s, v15.4s, v25.4s\n"
+ "fsub v15.4s, v15.4s, v25.4s\n"
+ "ldr q28, [x27, %[in_col_stride1]]\n"
+ "fadd v18.4s, v18.4s, v13.4s\n"
+ "fmla v19.4s, v13.4s, v0.s[1]\n"
+ "fadd v26.4s, v22.4s, v21.4s\n"
+ "mov v12.16b, v22.16b\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fadd v17.4s, v4.4s, v27.4s\n"
+ "fmul v15.4s, v15.4s, v0.s[0]\n"
+ "mov v4.16b, v27.16b\n"
+ "fmla v12.4s, v21.4s, v0.s[1]\n"
+ "ldr q22, [x27, x21]\n"
+ "fadd v18.4s, v18.4s, v2.4s\n"
+ "fadd v19.4s, v19.4s, v2.4s\n"
+ "fadd v17.4s, v17.4s, v30.4s\n"
+ "fmla v4.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v28.4s, v22.4s\n"
+ "fsub v27.4s, v28.4s, v22.4s\n"
+ "fadd v12.4s, v12.4s, v20.4s\n"
+ "ldr q29, [x27, x22]\n"
+ "str q18, [%[outptr0]]\n"
+ "fadd v22.4s, v16.4s, v23.4s\n"
+ "str q19, [x28]\n"
+ "fadd v28.4s, v24.4s, v25.4s\n"
+ "ldr q30, [x27, x23]\n"
+ "fadd v20.4s, v29.4s, v30.4s\n"
+ "fsub v18.4s, v29.4s, v30.4s\n"
+ "mov v21.16b, v25.16b\n"
+ "ldr q25, [x27, x24]\n"
+ "fmla v16.4s, v23.4s, v0.s[1]\n"
+ "ldr q19, [%[inptr0]]\n"
+ "fadd v17.4s, v17.4s, v2.4s\n"
+ "add x27, x27, #16\n"
+ "fadd v28.4s, v28.4s, v20.4s\n"
+ "fmul v18.4s, v18.4s, v0.s[0]\n"
+ "fmla v21.4s, v20.4s, v0.s[1]\n"
+ "ldr q20, [%[inptr0], %[in_col_stride1]]\n"
+ "fadd v22.4s, v22.4s, v2.4s\n"
+ "fadd v4.4s, v4.4s, v2.4s\n"
+ "str q17, [%[outptr0], x15]\n"
+ "mov v24.16b, v27.16b\n"
+ "fadd v23.4s, v27.4s, v18.4s\n"
+ "fadd v16.4s, v16.4s, v28.4s\n"
+ "fadd v13.4s, v14.4s, v26.4s\n"
+ "fsub v30.4s, v14.4s, v26.4s\n"
+ "str q22, [x17]\n"
+ "fmla v24.4s, v18.4s, v0.s[1]\n"
+ "str q4, [x28, x15]\n"
+ "mov v14.16b, v8.16b\n"
+ "fadd v29.4s, v11.4s, v15.4s\n"
+ "ldr q4, [%[inptr0], x21]\n"
+ "fadd v10.4s, v10.4s, v13.4s\n"
+ "ldr q17, [%[inptr0], x22]\n"
+ "fadd v24.4s, v24.4s, v25.4s\n"
+ "ldr q22, [%[inptr0], x23]\n"
+ "fmul v30.4s, v30.4s, v0.s[0]\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "mov v26.16b, v11.16b\n"
+ "fadd v13.4s, v3.4s, v6.4s\n"
+ "fsub v3.4s, v3.4s, v6.4s\n"
+ "ldr q18, [%[inptr0], x24]\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "fadd v29.4s, v29.4s, v2.4s\n"
+ "fadd v8.4s, v8.4s, v30.4s\n"
+ "fmla v14.4s, v30.4s, v0.s[1]\n"
+ "fmla v26.4s, v15.4s, v0.s[1]\n"
+ "ldr q25, [x25]\n"
+ "fadd v27.4s, v9.4s, v12.4s\n"
+ "fadd v1.4s, v1.4s, v13.4s\n"
+ "str q10, [%[outptr0], %[output_col_stride1]]\n"
+ "fsub v6.4s, v9.4s, v12.4s\n"
+ "str q29, [x17, x15]\n"
+ "fadd v14.4s, v14.4s, v23.4s\n"
+ "fadd v26.4s, v26.4s, v21.4s\n"
+ "ldr q12, [x25, %[in_col_stride1]]\n"
+ "fadd v1.4s, v1.4s, v27.4s\n"
+ "ldr q23, [x25, x21]\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "ldr q9, [x25, x22]\n"
+ "mov v5.16b, v13.16b\n"
+ "ldr q11, [x25, x23]\n"
+ "mov v13.16b, v3.16b\n"
+ "fadd v8.4s, v8.4s, v2.4s\n"
+ "fadd v1.4s, v1.4s, v2.4s\n"
+ "fadd v7.4s, v7.4s, v2.4s\n"
+ "fadd v10.4s, v3.4s, v6.4s\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "fmla v13.4s, v6.4s, v0.s[1]\n"
+ "ldr q6, [x25, x24]\n"
+ "str q8, [x17, %[output_col_stride1]]\n"
+ "fadd v16.4s, v16.4s, v2.4s\n"
+ "str q1, [%[outptr0], x16]\n"
+ "fadd v14.4s, v14.4s, v2.4s\n"
+ "str q7, [x28, %[output_col_stride1]]\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "fadd v13.4s, v13.4s, v24.4s\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "str q16, [x18]\n"
+ "fadd v5.4s, v5.4s, v2.4s\n"
+ "str q14, [x18, %[output_col_stride1]]\n"
+ "fadd v26.4s, v26.4s, v2.4s\n"
+ "str q10, [x17, x16]\n"
+ "fadd v1.4s, v20.4s, v4.4s\n"
+ "fadd v13.4s, v13.4s, v2.4s\n"
+ "add x17, x17, #16\n"
+ "str q5, [x28, x16]\n"
+ "fadd v5.4s, v17.4s, v22.4s\n"
+ "str q26, [x18, x15]\n"
+ "fsub v7.4s, v20.4s, v4.4s\n"
+ "fadd v8.4s, v19.4s, v1.4s\n"
+ "add x28, x28, #16\n"
+ "str q13, [x18, x16]\n"
+ "mov v4.16b, v1.16b\n"
+ "fsub v10.4s, v17.4s, v22.4s\n"
+ "add x18, x18, #16\n"
+ "mov v1.16b, v7.16b\n"
+ "fadd v8.4s, v8.4s, v5.4s\n"
+ "fmla v4.4s, v5.4s, v0.s[1]\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "fmla v1.4s, v10.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v18.4s\n"
+ "bne 2b\n"
+ "3:\n"
+ "fadd v3.4s, v12.4s, v23.4s\n"
+ "ldr q2, [x13]\n"
+ "fadd v27.4s, v9.4s, v11.4s\n"
+ "ldr q21, [x13, %[in_col_stride1]]\n"
+ "fsub v16.4s, v12.4s, v23.4s\n"
+ "ldr q26, [x13, x21]\n"
+ "fsub v9.4s, v9.4s, v11.4s\n"
+ "ldr q17, [x13, x22]\n"
+ "fadd v14.4s, v25.4s, v3.4s\n"
+ "ldr q19, [x13, x23]\n"
+ "mov v11.16b, v3.16b\n"
+ "ldr q10, [x13, x24]\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr q15, [x26]\n"
+ "fmul v9.4s, v9.4s, v0.s[0]\n"
+ "ldr q12, [x26, %[in_col_stride1]]\n"
+ "fadd v14.4s, v14.4s, v27.4s\n"
+ "ldr q20, [x26, x21]\n"
+ "fmla v11.4s, v27.4s, v0.s[1]\n"
+ "ldr q24, [x26, x22]\n"
+ "fadd v23.4s, v21.4s, v26.4s\n"
+ "ldr q29, [x26, x23]\n"
+ "fadd v13.4s, v16.4s, v9.4s\n"
+ "ldr q5, [x26, x24]\n"
+ "fmla v3.4s, v9.4s, v0.s[1]\n"
+ "ldr q18, [x14]\n"
+ "fadd v30.4s, v17.4s, v19.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fadd v16.4s, v2.4s, v23.4s\n"
+ "add x25, x25, #16\n"
+ "fsub v21.4s, v21.4s, v26.4s\n"
+ "ldr q22, [x14, %[in_col_stride1]]\n"
+ "fadd v3.4s, v3.4s, v6.4s\n"
+ "ldr q28, [x14, x21]\n"
+ "fsub v19.4s, v17.4s, v19.4s\n"
+ "add x13, x13, #16\n"
+ "fadd v16.4s, v16.4s, v30.4s\n"
+ "add x26, x26, #16\n"
+ "mov v17.16b, v23.16b\n"
+ "fadd v26.4s, v12.4s, v20.4s\n"
+ "fsub v9.4s, v12.4s, v20.4s\n"
+ "ldr q2, [%[bptr]]\n"
+ "fmul v19.4s, v19.4s, v0.s[0]\n"
+ "add %[bptr], %[bptr], #16\n"
+ "fmla v17.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v24.4s, v29.4s\n"
+ "fadd v23.4s, v15.4s, v26.4s\n"
+ "fsub v12.4s, v24.4s, v29.4s\n"
+ "mov v15.16b, v26.16b\n"
+ "fadd v24.4s, v22.4s, v28.4s\n"
+ "fsub v22.4s, v22.4s, v28.4s\n"
+ "fadd v29.4s, v14.4s, v16.4s\n"
+ "fsub v16.4s, v14.4s, v16.4s\n"
+ "ldr q20, [x14, x22]\n"
+ "fadd v23.4s, v23.4s, v25.4s\n"
+ "fmul v12.4s, v12.4s, v0.s[0]\n"
+ "fmla v15.4s, v25.4s, v0.s[1]\n"
+ "mov v6.16b, v21.16b\n"
+ "fadd v30.4s, v21.4s, v19.4s\n"
+ "fadd v26.4s, v18.4s, v24.4s\n"
+ "mov v25.16b, v24.16b\n"
+ "fadd v18.4s, v8.4s, v29.4s\n"
+ "fmla v6.4s, v19.4s, v0.s[1]\n"
+ "mov v19.16b, v29.16b\n"
+ "fadd v27.4s, v11.4s, v17.4s\n"
+ "fsub v11.4s, v11.4s, v17.4s\n"
+ "fadd v29.4s, v13.4s, v30.4s\n"
+ "fsub v8.4s, v13.4s, v30.4s\n"
+ "fadd v14.4s, v9.4s, v12.4s\n"
+ "fadd v6.4s, v6.4s, v10.4s\n"
+ "ldr q28, [x14, x23]\n"
+ "fadd v17.4s, v4.4s, v27.4s\n"
+ "mov v4.16b, v27.16b\n"
+ "fmla v9.4s, v12.4s, v0.s[1]\n"
+ "fadd v27.4s, v20.4s, v28.4s\n"
+ "fsub v21.4s, v20.4s, v28.4s\n"
+ "fadd v10.4s, v7.4s, v29.4s\n"
+ "mov v7.16b, v29.16b\n"
+ "fadd v13.4s, v3.4s, v6.4s\n"
+ "fsub v3.4s, v3.4s, v6.4s\n"
+ "ldr q20, [x14, x24]\n"
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "fadd v26.4s, v26.4s, v27.4s\n"
+ "fmul v21.4s, v21.4s, v0.s[0]\n"
+ "add x14, x14, #16\n"
+ "fmla v25.4s, v27.4s, v0.s[1]\n"
+ "mov v12.16b, v22.16b\n"
+ "fadd v1.4s, v1.4s, v13.4s\n"
+ "mov v5.16b, v13.16b\n"
+ "fadd v13.4s, v23.4s, v26.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "fadd v26.4s, v22.4s, v21.4s\n"
+ "ldr q24, [x27]\n"
+ "fmla v12.4s, v21.4s, v0.s[1]\n"
+ "fadd v30.4s, v15.4s, v25.4s\n"
+ "fsub v15.4s, v15.4s, v25.4s\n"
+ "ldr q28, [x27, %[in_col_stride1]]\n"
+ "fadd v18.4s, v18.4s, v13.4s\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fmla v19.4s, v13.4s, v0.s[1]\n"
+ "ldr q22, [x27, x21]\n"
+ "fadd v12.4s, v12.4s, v20.4s\n"
+ "ldr q29, [x27, x22]\n"
+ "fadd v17.4s, v17.4s, v30.4s\n"
+ "fmul v15.4s, v15.4s, v0.s[0]\n"
+ "fmla v4.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v28.4s, v22.4s\n"
+ "fsub v27.4s, v28.4s, v22.4s\n"
+ "fadd v22.4s, v16.4s, v23.4s\n"
+ "fadd v18.4s, v18.4s, v2.4s\n"
+ "fadd v17.4s, v17.4s, v2.4s\n"
+ "fadd v19.4s, v19.4s, v2.4s\n"
+ "fadd v28.4s, v24.4s, v25.4s\n"
+ "mov v21.16b, v25.16b\n"
+ "fmla v16.4s, v23.4s, v0.s[1]\n"
+ "ldr q30, [x27, x23]\n"
+ "str q18, [%[outptr0]]\n"
+ "fadd v20.4s, v29.4s, v30.4s\n"
+ "str q17, [%[outptr0], x15]\n"
+ "fsub v18.4s, v29.4s, v30.4s\n"
+ "str q19, [x28]\n"
+ "mov v24.16b, v27.16b\n"
+ "fadd v13.4s, v14.4s, v26.4s\n"
+ "ldr q25, [x27, x24]\n"
+ "fadd v28.4s, v28.4s, v20.4s\n"
+ "add x27, x27, #16\n"
+ "fmul v18.4s, v18.4s, v0.s[0]\n"
+ "fmla v21.4s, v20.4s, v0.s[1]\n"
+ "fsub v30.4s, v14.4s, v26.4s\n"
+ "mov v14.16b, v8.16b\n"
+ "fadd v10.4s, v10.4s, v13.4s\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "fadd v16.4s, v16.4s, v28.4s\n"
+ "fadd v29.4s, v11.4s, v15.4s\n"
+ "fadd v23.4s, v27.4s, v18.4s\n"
+ "fmla v24.4s, v18.4s, v0.s[1]\n"
+ "fmul v30.4s, v30.4s, v0.s[0]\n"
+ "mov v26.16b, v11.16b\n"
+ "fadd v27.4s, v9.4s, v12.4s\n"
+ "fsub v6.4s, v9.4s, v12.4s\n"
+ "mov v13.16b, v3.16b\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "fadd v24.4s, v24.4s, v25.4s\n"
+ "fmla v26.4s, v15.4s, v0.s[1]\n"
+ "fadd v8.4s, v8.4s, v30.4s\n"
+ "fmla v14.4s, v30.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v27.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "str q10, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "fadd v26.4s, v26.4s, v21.4s\n"
+ "fadd v22.4s, v22.4s, v2.4s\n"
+ "fadd v14.4s, v14.4s, v23.4s\n"
+ "fadd v8.4s, v8.4s, v2.4s\n"
+ "fadd v10.4s, v3.4s, v6.4s\n"
+ "fmla v13.4s, v6.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v2.4s\n"
+ "fadd v29.4s, v29.4s, v2.4s\n"
+ "str q22, [x17]\n"
+ "fadd v7.4s, v7.4s, v2.4s\n"
+ "str q8, [x17, %[output_col_stride1]]\n"
+ "fadd v4.4s, v4.4s, v2.4s\n"
+ "fadd v13.4s, v13.4s, v24.4s\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "str q1, [%[outptr0], x16]\n"
+ "fadd v5.4s, v5.4s, v2.4s\n"
+ "str q29, [x17, x15]\n"
+ "fadd v16.4s, v16.4s, v2.4s\n"
+ "str q7, [x28, %[output_col_stride1]]\n"
+ "fadd v14.4s, v14.4s, v2.4s\n"
+ "str q10, [x17, x16]\n"
+ "fadd v26.4s, v26.4s, v2.4s\n"
+ "str q4, [x28, x15]\n"
+ "fadd v13.4s, v13.4s, v2.4s\n"
+ "str q5, [x28, x16]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "str q16, [x18]\n"
+ "add x17, x17, #16\n"
+ "str q14, [x18, %[output_col_stride1]]\n"
+ "add x28, x28, #16\n"
+ "str q26, [x18, x15]\n"
+ "str q13, [x18, x16]\n"
+ "add x18, x18, #16\n"
+ "4:\n"
+ "cmp x20, #2\n"
+ "blt 5f\n"
+ "ldr d19, [%[inptr0]]\n"
+ "ldr d20, [%[inptr0], %[in_col_stride1]]\n"
+ "sub x20, x20, #2\n"
+ "ldr d4, [%[inptr0], x21]\n"
+ "ldr d17, [%[inptr0], x22]\n"
+ "fadd v1.4s, v20.4s, v4.4s\n"
+ "ldr d22, [%[inptr0], x23]\n"
+ "fadd v5.4s, v17.4s, v22.4s\n"
+ "ldr d18, [%[inptr0], x24]\n"
+ "fsub v7.4s, v20.4s, v4.4s\n"
+ "ldr d25, [x25]\n"
+ "fsub v10.4s, v17.4s, v22.4s\n"
+ "ldr d12, [x25, %[in_col_stride1]]\n"
+ "fadd v8.4s, v19.4s, v1.4s\n"
+ "ldr d23, [x25, x21]\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d9, [x25, x22]\n"
+ "mov v1.16b, v7.16b\n"
+ "ldr d11, [x25, x23]\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "ldr d6, [x25, x24]\n"
+ "fadd v8.4s, v8.4s, v5.4s\n"
+ "ldr d2, [x13]\n"
+ "fmla v4.4s, v5.4s, v0.s[1]\n"
+ "ldr d21, [x13, %[in_col_stride1]]\n"
+ "fadd v3.4s, v12.4s, v23.4s\n"
+ "ldr d26, [x13, x21]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "ldr d17, [x13, x22]\n"
+ "fmla v1.4s, v10.4s, v0.s[1]\n"
+ "ldr d19, [x13, x23]\n"
+ "fadd v27.4s, v9.4s, v11.4s\n"
+ "ldr d10, [x13, x24]\n"
+ "fadd v14.4s, v25.4s, v3.4s\n"
+ "ldr d15, [x26]\n"
+ "fsub v16.4s, v12.4s, v23.4s\n"
+ "ldr d12, [x26, %[in_col_stride1]]\n"
+ "fadd v1.4s, v1.4s, v18.4s\n"
+ "ldr d20, [x26, x21]\n"
+ "fsub v9.4s, v9.4s, v11.4s\n"
+ "ldr d24, [x26, x22]\n"
+ "fadd v14.4s, v14.4s, v27.4s\n"
+ "ldr d29, [x26, x23]\n"
+ "mov v11.16b, v3.16b\n"
+ "ldr d5, [x26, x24]\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr d18, [x14]\n"
+ "fmul v9.4s, v9.4s, v0.s[0]\n"
+ "add %[inptr0], %[inptr0], #8\n"
+ "fmla v11.4s, v27.4s, v0.s[1]\n"
+ "add x25, x25, #8\n"
+ "fadd v23.4s, v21.4s, v26.4s\n"
+ "add x13, x13, #8\n"
+ "fsub v21.4s, v21.4s, v26.4s\n"
+ "ldr d22, [x14, %[in_col_stride1]]\n"
+ "fadd v13.4s, v16.4s, v9.4s\n"
+ "add x26, x26, #8\n"
+ "fmla v3.4s, v9.4s, v0.s[1]\n"
+ "fadd v30.4s, v17.4s, v19.4s\n"
+ "fadd v16.4s, v2.4s, v23.4s\n"
+ "fsub v19.4s, v17.4s, v19.4s\n"
+ "mov v17.16b, v23.16b\n"
+ "fadd v26.4s, v12.4s, v20.4s\n"
+ "fsub v9.4s, v12.4s, v20.4s\n"
+ "ldr d28, [x14, x21]\n"
+ "fadd v3.4s, v3.4s, v6.4s\n"
+ "ldr d20, [x14, x22]\n"
+ "fadd v16.4s, v16.4s, v30.4s\n"
+ "fmul v19.4s, v19.4s, v0.s[0]\n"
+ "fmla v17.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v24.4s, v29.4s\n"
+ "fadd v23.4s, v15.4s, v26.4s\n"
+ "fsub v12.4s, v24.4s, v29.4s\n"
+ "mov v15.16b, v26.16b\n"
+ "fadd v24.4s, v22.4s, v28.4s\n"
+ "fsub v22.4s, v22.4s, v28.4s\n"
+ "fadd v29.4s, v14.4s, v16.4s\n"
+ "fsub v16.4s, v14.4s, v16.4s\n"
+ "ldr d28, [x14, x23]\n"
+ "fadd v23.4s, v23.4s, v25.4s\n"
+ "fmul v12.4s, v12.4s, v0.s[0]\n"
+ "fmla v15.4s, v25.4s, v0.s[1]\n"
+ "mov v6.16b, v21.16b\n"
+ "fadd v30.4s, v21.4s, v19.4s\n"
+ "fadd v26.4s, v18.4s, v24.4s\n"
+ "mov v25.16b, v24.16b\n"
+ "fadd v18.4s, v8.4s, v29.4s\n"
+ "fmla v6.4s, v19.4s, v0.s[1]\n"
+ "fadd v27.4s, v20.4s, v28.4s\n"
+ "fsub v21.4s, v20.4s, v28.4s\n"
+ "mov v19.16b, v29.16b\n"
+ "fadd v29.4s, v13.4s, v30.4s\n"
+ "fsub v8.4s, v13.4s, v30.4s\n"
+ "fadd v14.4s, v9.4s, v12.4s\n"
+ "fadd v6.4s, v6.4s, v10.4s\n"
+ "ldr d20, [x14, x24]\n"
+ "fadd v26.4s, v26.4s, v27.4s\n"
+ "add x14, x14, #8\n"
+ "fmla v9.4s, v12.4s, v0.s[1]\n"
+ "ldr d24, [x27]\n"
+ "fmul v21.4s, v21.4s, v0.s[0]\n"
+ "fmla v25.4s, v27.4s, v0.s[1]\n"
+ "fadd v10.4s, v7.4s, v29.4s\n"
+ "ldr d2, [%[bptr]]\n"
+ "mov v7.16b, v29.16b\n"
+ "add %[bptr], %[bptr], #8\n"
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "fadd v13.4s, v23.4s, v26.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "fadd v27.4s, v11.4s, v17.4s\n"
+ "fsub v11.4s, v11.4s, v17.4s\n"
+ "fadd v30.4s, v15.4s, v25.4s\n"
+ "fsub v15.4s, v15.4s, v25.4s\n"
+ "ldr d28, [x27, %[in_col_stride1]]\n"
+ "fadd v18.4s, v18.4s, v13.4s\n"
+ "fmla v19.4s, v13.4s, v0.s[1]\n"
+ "fadd v26.4s, v22.4s, v21.4s\n"
+ "mov v12.16b, v22.16b\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fadd v17.4s, v4.4s, v27.4s\n"
+ "fmul v15.4s, v15.4s, v0.s[0]\n"
+ "mov v4.16b, v27.16b\n"
+ "fmla v12.4s, v21.4s, v0.s[1]\n"
+ "ldr d22, [x27, x21]\n"
+ "fadd v18.4s, v18.4s, v2.4s\n"
+ "fadd v19.4s, v19.4s, v2.4s\n"
+ "fadd v17.4s, v17.4s, v30.4s\n"
+ "fmla v4.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v28.4s, v22.4s\n"
+ "fsub v27.4s, v28.4s, v22.4s\n"
+ "fadd v12.4s, v12.4s, v20.4s\n"
+ "ldr d29, [x27, x22]\n"
+ "str d18, [%[outptr0]]\n"
+ "fadd v22.4s, v16.4s, v23.4s\n"
+ "str d19, [x28]\n"
+ "fadd v28.4s, v24.4s, v25.4s\n"
+ "ldr d30, [x27, x23]\n"
+ "fadd v20.4s, v29.4s, v30.4s\n"
+ "fsub v18.4s, v29.4s, v30.4s\n"
+ "mov v21.16b, v25.16b\n"
+ "ldr d25, [x27, x24]\n"
+ "fmla v16.4s, v23.4s, v0.s[1]\n"
+ "add x27, x27, #8\n"
+ "mov v24.16b, v27.16b\n"
+ "fadd v17.4s, v17.4s, v2.4s\n"
+ "fadd v28.4s, v28.4s, v20.4s\n"
+ "fmul v18.4s, v18.4s, v0.s[0]\n"
+ "fmla v21.4s, v20.4s, v0.s[1]\n"
+ "fadd v13.4s, v14.4s, v26.4s\n"
+ "fsub v30.4s, v14.4s, v26.4s\n"
+ "mov v14.16b, v8.16b\n"
+ "str d17, [%[outptr0], x15]\n"
+ "fadd v29.4s, v11.4s, v15.4s\n"
+ "fadd v23.4s, v27.4s, v18.4s\n"
+ "fmla v24.4s, v18.4s, v0.s[1]\n"
+ "fadd v16.4s, v16.4s, v28.4s\n"
+ "fadd v10.4s, v10.4s, v13.4s\n"
+ "fmul v30.4s, v30.4s, v0.s[0]\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "mov v26.16b, v11.16b\n"
+ "fadd v13.4s, v3.4s, v6.4s\n"
+ "fadd v24.4s, v24.4s, v25.4s\n"
+ "fadd v27.4s, v9.4s, v12.4s\n"
+ "fsub v3.4s, v3.4s, v6.4s\n"
+ "fsub v6.4s, v9.4s, v12.4s\n"
+ "fadd v8.4s, v8.4s, v30.4s\n"
+ "fmla v14.4s, v30.4s, v0.s[1]\n"
+ "fmla v26.4s, v15.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v13.4s\n"
+ "mov v5.16b, v13.16b\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "mov v13.16b, v3.16b\n"
+ "fadd v14.4s, v14.4s, v23.4s\n"
+ "fadd v22.4s, v22.4s, v2.4s\n"
+ "fadd v26.4s, v26.4s, v21.4s\n"
+ "fadd v1.4s, v1.4s, v27.4s\n"
+ "str d10, [%[outptr0], %[output_col_stride1]]\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "fadd v10.4s, v3.4s, v6.4s\n"
+ "fmla v13.4s, v6.4s, v0.s[1]\n"
+ "str d22, [x17]\n"
+ "fadd v8.4s, v8.4s, v2.4s\n"
+ "fadd v1.4s, v1.4s, v2.4s\n"
+ "fadd v29.4s, v29.4s, v2.4s\n"
+ "fadd v7.4s, v7.4s, v2.4s\n"
+ "fadd v4.4s, v4.4s, v2.4s\n"
+ "fadd v13.4s, v13.4s, v24.4s\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "str d8, [x17, %[output_col_stride1]]\n"
+ "fadd v5.4s, v5.4s, v2.4s\n"
+ "str d1, [%[outptr0], x16]\n"
+ "fadd v16.4s, v16.4s, v2.4s\n"
+ "str d29, [x17, x15]\n"
+ "fadd v14.4s, v14.4s, v2.4s\n"
+ "str d10, [x17, x16]\n"
+ "fadd v26.4s, v26.4s, v2.4s\n"
+ "str d7, [x28, %[output_col_stride1]]\n"
+ "fadd v13.4s, v13.4s, v2.4s\n"
+ "str d4, [x28, x15]\n"
+ "add %[outptr0], %[outptr0], #8\n"
+ "str d5, [x28, x16]\n"
+ "add x17, x17, #8\n"
+ "str d16, [x18]\n"
+ "add x28, x28, #8\n"
+ "str d14, [x18, %[output_col_stride1]]\n"
+ "str d26, [x18, x15]\n"
+ "str d13, [x18, x16]\n"
+ "add x18, x18, #8\n"
+ "5:\n"
+ "cbz x20, 6f\n"
+ "ldr s19, [%[inptr0]]\n"
+ "ldr s20, [%[inptr0], %[in_col_stride1]]\n"
+ "ldr s4, [%[inptr0], x21]\n"
+ "fadd v1.4s, v20.4s, v4.4s\n"
+ "ldr s17, [%[inptr0], x22]\n"
+ "fsub v7.4s, v20.4s, v4.4s\n"
+ "ldr s22, [%[inptr0], x23]\n"
+ "fadd v5.4s, v17.4s, v22.4s\n"
+ "ldr s18, [%[inptr0], x24]\n"
+ "fsub v10.4s, v17.4s, v22.4s\n"
+ "ldr s25, [x25]\n"
+ "fadd v8.4s, v19.4s, v1.4s\n"
+ "ldr s12, [x25, %[in_col_stride1]]\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr s23, [x25, x21]\n"
+ "mov v1.16b, v7.16b\n"
+ "ldr s9, [x25, x22]\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "ldr s11, [x25, x23]\n"
+ "fadd v8.4s, v8.4s, v5.4s\n"
+ "ldr s6, [x25, x24]\n"
+ "fmla v4.4s, v5.4s, v0.s[1]\n"
+ "ldr s2, [x13]\n"
+ "fadd v3.4s, v12.4s, v23.4s\n"
+ "ldr s21, [x13, %[in_col_stride1]]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "ldr s26, [x13, x21]\n"
+ "fmla v1.4s, v10.4s, v0.s[1]\n"
+ "ldr s17, [x13, x22]\n"
+ "fadd v27.4s, v9.4s, v11.4s\n"
+ "ldr s19, [x13, x23]\n"
+ "fadd v14.4s, v25.4s, v3.4s\n"
+ "ldr s10, [x13, x24]\n"
+ "fsub v16.4s, v12.4s, v23.4s\n"
+ "ldr s15, [x26]\n"
+ "fadd v1.4s, v1.4s, v18.4s\n"
+ "ldr s12, [x26, %[in_col_stride1]]\n"
+ "fsub v9.4s, v9.4s, v11.4s\n"
+ "ldr s20, [x26, x21]\n"
+ "fadd v14.4s, v14.4s, v27.4s\n"
+ "ldr s24, [x26, x22]\n"
+ "mov v11.16b, v3.16b\n"
+ "ldr s29, [x26, x23]\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr s5, [x26, x24]\n"
+ "fmul v9.4s, v9.4s, v0.s[0]\n"
+ "ldr s18, [x14]\n"
+ "fmla v11.4s, v27.4s, v0.s[1]\n"
+ "fadd v23.4s, v21.4s, v26.4s\n"
+ "fsub v21.4s, v21.4s, v26.4s\n"
+ "fadd v30.4s, v17.4s, v19.4s\n"
+ "fsub v19.4s, v17.4s, v19.4s\n"
+ "ldr s22, [x14, %[in_col_stride1]]\n"
+ "fadd v13.4s, v16.4s, v9.4s\n"
+ "fmla v3.4s, v9.4s, v0.s[1]\n"
+ "fadd v16.4s, v2.4s, v23.4s\n"
+ "mov v17.16b, v23.16b\n"
+ "fadd v26.4s, v12.4s, v20.4s\n"
+ "fsub v9.4s, v12.4s, v20.4s\n"
+ "fmul v19.4s, v19.4s, v0.s[0]\n"
+ "ldr s28, [x14, x21]\n"
+ "fadd v3.4s, v3.4s, v6.4s\n"
+ "ldr s20, [x14, x22]\n"
+ "fadd v16.4s, v16.4s, v30.4s\n"
+ "fmla v17.4s, v30.4s, v0.s[1]\n"
+ "fadd v25.4s, v24.4s, v29.4s\n"
+ "fadd v23.4s, v15.4s, v26.4s\n"
+ "fsub v12.4s, v24.4s, v29.4s\n"
+ "mov v15.16b, v26.16b\n"
+ "fadd v24.4s, v22.4s, v28.4s\n"
+ "fsub v22.4s, v22.4s, v28.4s\n"
+ "fadd v30.4s, v21.4s, v19.4s\n"
+ "mov v6.16b, v21.16b\n"
+ "fadd v23.4s, v23.4s, v25.4s\n"
+ "fmla v15.4s, v25.4s, v0.s[1]\n"
+ "fmul v12.4s, v12.4s, v0.s[0]\n"
+ "ldr s28, [x14, x23]\n"
+ "fmla v6.4s, v19.4s, v0.s[1]\n"
+ "fadd v27.4s, v20.4s, v28.4s\n"
+ "fadd v26.4s, v18.4s, v24.4s\n"
+ "fsub v21.4s, v20.4s, v28.4s\n"
+ "mov v25.16b, v24.16b\n"
+ "fadd v29.4s, v14.4s, v16.4s\n"
+ "fsub v16.4s, v14.4s, v16.4s\n"
+ "ldr s20, [x14, x24]\n"
+ "fadd v6.4s, v6.4s, v10.4s\n"
+ "ldr s24, [x27]\n"
+ "fadd v26.4s, v26.4s, v27.4s\n"
+ "fmul v21.4s, v21.4s, v0.s[0]\n"
+ "fmla v25.4s, v27.4s, v0.s[1]\n"
+ "fadd v18.4s, v8.4s, v29.4s\n"
+ "mov v19.16b, v29.16b\n"
+ "fadd v29.4s, v13.4s, v30.4s\n"
+ "fsub v8.4s, v13.4s, v30.4s\n"
+ "fadd v27.4s, v11.4s, v17.4s\n"
+ "fsub v11.4s, v11.4s, v17.4s\n"
+ "fadd v13.4s, v23.4s, v26.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "ldr s28, [x27, %[in_col_stride1]]\n"
+ "fadd v10.4s, v7.4s, v29.4s\n"
+ "mov v7.16b, v29.16b\n"
+ "fadd v17.4s, v4.4s, v27.4s\n"
+ "mov v4.16b, v27.16b\n"
+ "fadd v18.4s, v18.4s, v13.4s\n"
+ "fmla v19.4s, v13.4s, v0.s[1]\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fadd v30.4s, v15.4s, v25.4s\n"
+ "fsub v15.4s, v15.4s, v25.4s\n"
+ "fadd v13.4s, v3.4s, v6.4s\n"
+ "fsub v3.4s, v3.4s, v6.4s\n"
+ "ldr s2, [%[bptr]]\n"
+ "fadd v18.4s, v18.4s, v2.4s\n"
+ "fadd v19.4s, v19.4s, v2.4s\n"
+ "fadd v17.4s, v17.4s, v30.4s\n"
+ "fmla v4.4s, v30.4s, v0.s[1]\n"
+ "fadd v14.4s, v9.4s, v12.4s\n"
+ "fmul v15.4s, v15.4s, v0.s[0]\n"
+ "fadd v1.4s, v1.4s, v13.4s\n"
+ "str s18, [%[outptr0]]\n"
+ "fadd v26.4s, v22.4s, v21.4s\n"
+ "str s19, [x28]\n"
+ "fmla v9.4s, v12.4s, v0.s[1]\n"
+ "mov v12.16b, v22.16b\n"
+ "ldr s22, [x27, x21]\n"
+ "fadd v25.4s, v28.4s, v22.4s\n"
+ "fsub v27.4s, v28.4s, v22.4s\n"
+ "fadd v22.4s, v16.4s, v23.4s\n"
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "ldr s29, [x27, x22]\n"
+ "fmla v12.4s, v21.4s, v0.s[1]\n"
+ "ldr s30, [x27, x23]\n"
+ "fadd v28.4s, v24.4s, v25.4s\n"
+ "mov v21.16b, v25.16b\n"
+ "fmla v16.4s, v23.4s, v0.s[1]\n"
+ "ldr s25, [x27, x24]\n"
+ "mov v5.16b, v13.16b\n"
+ "fadd v17.4s, v17.4s, v2.4s\n"
+ "fadd v12.4s, v12.4s, v20.4s\n"
+ "fadd v20.4s, v29.4s, v30.4s\n"
+ "fsub v18.4s, v29.4s, v30.4s\n"
+ "mov v24.16b, v27.16b\n"
+ "fadd v22.4s, v22.4s, v2.4s\n"
+ "fadd v4.4s, v4.4s, v2.4s\n"
+ "str s17, [%[outptr0], x15]\n"
+ "fadd v13.4s, v14.4s, v26.4s\n"
+ "fadd v28.4s, v28.4s, v20.4s\n"
+ "fmla v21.4s, v20.4s, v0.s[1]\n"
+ "fmul v18.4s, v18.4s, v0.s[0]\n"
+ "fsub v30.4s, v14.4s, v26.4s\n"
+ "str s22, [x17]\n"
+ "mov v14.16b, v8.16b\n"
+ "str s4, [x28, x15]\n"
+ "fadd v10.4s, v10.4s, v13.4s\n"
+ "fadd v16.4s, v16.4s, v28.4s\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "fadd v23.4s, v27.4s, v18.4s\n"
+ "fmla v24.4s, v18.4s, v0.s[1]\n"
+ "fmul v30.4s, v30.4s, v0.s[0]\n"
+ "fadd v29.4s, v11.4s, v15.4s\n"
+ "mov v26.16b, v11.16b\n"
+ "fadd v27.4s, v9.4s, v12.4s\n"
+ "fsub v6.4s, v9.4s, v12.4s\n"
+ "mov v13.16b, v3.16b\n"
+ "fadd v24.4s, v24.4s, v25.4s\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "fadd v8.4s, v8.4s, v30.4s\n"
+ "fmla v14.4s, v30.4s, v0.s[1]\n"
+ "fmla v26.4s, v15.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v27.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "str s10, [%[outptr0], %[output_col_stride1]]\n"
+ "fadd v29.4s, v29.4s, v2.4s\n"
+ "fadd v14.4s, v14.4s, v23.4s\n"
+ "fadd v8.4s, v8.4s, v2.4s\n"
+ "fadd v26.4s, v26.4s, v21.4s\n"
+ "fadd v1.4s, v1.4s, v2.4s\n"
+ "fadd v10.4s, v3.4s, v6.4s\n"
+ "fmla v13.4s, v6.4s, v0.s[1]\n"
+ "str s29, [x17, x15]\n"
+ "fadd v7.4s, v7.4s, v2.4s\n"
+ "str s8, [x17, %[output_col_stride1]]\n"
+ "fadd v5.4s, v5.4s, v2.4s\n"
+ "str s1, [%[outptr0], x16]\n"
+ "fadd v16.4s, v16.4s, v2.4s\n"
+ "fadd v13.4s, v13.4s, v24.4s\n"
+ "fadd v10.4s, v10.4s, v2.4s\n"
+ "str s7, [x28, %[output_col_stride1]]\n"
+ "fadd v14.4s, v14.4s, v2.4s\n"
+ "str s5, [x28, x16]\n"
+ "fadd v26.4s, v26.4s, v2.4s\n"
+ "str s16, [x18]\n"
+ "fadd v13.4s, v13.4s, v2.4s\n"
+ "str s10, [x17, x16]\n"
+ "str s14, [x18, %[output_col_stride1]]\n"
+ "str s26, [x18, x15]\n"
+ "str s13, [x18, x16]\n"
+ "6:\n"
+ : [bptr] "+r" (bptr), [outptr0] "+r" (output), [inptr0] "+r" (inptr)
+ : [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [pcoeffs] "r" (coeffs), [n_channels] "r" ((long) n_channels), [in_row_stride] "r" (6 * matrix_stride * sizeof(float)), [in_col_stride1] "r" (matrix_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__ (
+ "ldr d0, [%[pcoeffs]]\n"
+ "add x21, %[in_col_stride1], %[in_col_stride1]\n" // Compute input column stride 2
+ "add x22, x21, %[in_col_stride1]\n" // Compute input column stride 3
+ "add x25, %[inptr0], %[in_row_stride]\n" // Compute input row pointers
+ "add x15, %[output_col_stride1], %[output_col_stride1]\n" // Compute output column stride 2
+ "add x23, x22, %[in_col_stride1]\n" // Compute input column stride 4
+ "add x13, x25, %[in_row_stride]\n" // Compute input row pointers
+ "add x16, x15, %[output_col_stride1]\n" // Compute output column stride 3
+ "add x24, x23, %[in_col_stride1]\n" // Compute input column stride 5
+ "add x26, x13, %[in_row_stride]\n" // Compute input row pointers
+ "add x17, %[outptr0], %[output_row_stride]\n" // Compute output row pointer 1
+ "add x14, x26, %[in_row_stride]\n" // Compute input row pointers
+ "add x28, x17, %[output_row_stride]\n" // Compute output row pointer 2
+ "lsr x19, %[n_channels], #2\n"
+ "add x27, x14, %[in_row_stride]\n" // Compute input row pointers
+ "add x18, x28, %[output_row_stride]\n" // Compute output row pointer 3
+ "and x20, %[n_channels], #3\n"
+ "cbz x19, 4f\n"
+ "1:\n" // Quad head
+ "ldr q17, [%[inptr0]]\n"
+ "subs x19, x19, #1\n"
+ "ldr q23, [%[inptr0], %[in_col_stride1]]\n"
+ "ldr q27, [%[inptr0], x21]\n"
+ "fadd v4.4s, v23.4s, v27.4s\n"
+ "ldr q24, [%[inptr0], x22]\n"
+ "fsub v13.4s, v23.4s, v27.4s\n"
+ "ldr q11, [%[inptr0], x23]\n"
+ "fadd v10.4s, v24.4s, v11.4s\n"
+ "ldr q12, [%[inptr0], x24]\n"
+ "fsub v11.4s, v24.4s, v11.4s\n"
+ "ldr q20, [x25]\n"
+ "fadd v7.4s, v17.4s, v4.4s\n"
+ "ldr q19, [x25, %[in_col_stride1]]\n"
+ "mov v4.16b, v4.16b\n"
+ "ldr q22, [x25, x21]\n"
+ "mov v1.16b, v13.16b\n"
+ "ldr q14, [x25, x22]\n"
+ "fmul v11.4s, v11.4s, v0.s[0]\n"
+ "ldr q18, [x25, x23]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "ldr q3, [x25, x24]\n"
+ "fmla v4.4s, v10.4s, v0.s[1]\n"
+ "fadd v8.4s, v13.4s, v11.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v12.4s\n"
+ "beq 3f\n"
+ "2:\n" // Quad loop
+ "fadd v2.4s, v19.4s, v22.4s\n"
+ "ldr q16, [x13]\n"
+ "fadd v23.4s, v14.4s, v18.4s\n"
+ "ldr q21, [x13, %[in_col_stride1]]\n"
+ "fsub v15.4s, v19.4s, v22.4s\n"
+ "ldr q24, [x13, x21]\n"
+ "fsub v31.4s, v14.4s, v18.4s\n"
+ "ldr q25, [x13, x22]\n"
+ "fadd v11.4s, v20.4s, v2.4s\n"
+ "ldr q17, [x13, x23]\n"
+ "mov v13.16b, v2.16b\n"
+ "ldr q9, [x13, x24]\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr q6, [x26]\n"
+ "fmul v31.4s, v31.4s, v0.s[0]\n"
+ "ldr q19, [x26, %[in_col_stride1]]\n"
+ "fadd v11.4s, v11.4s, v23.4s\n"
+ "ldr q22, [x26, x21]\n"
+ "fmla v13.4s, v23.4s, v0.s[1]\n"
+ "ldr q12, [x26, x22]\n"
+ "fadd v29.4s, v21.4s, v24.4s\n"
+ "ldr q26, [x26, x23]\n"
+ "fadd v15.4s, v15.4s, v31.4s\n"
+ "ldr q5, [x26, x24]\n"
+ "fmla v2.4s, v31.4s, v0.s[1]\n"
+ "ldr q10, [x14]\n"
+ "fadd v18.4s, v25.4s, v17.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fadd v27.4s, v16.4s, v29.4s\n"
+ "add x25, x25, #16\n"
+ "fsub v14.4s, v21.4s, v24.4s\n"
+ "ldr q30, [x14, %[in_col_stride1]]\n"
+ "fadd v2.4s, v2.4s, v3.4s\n"
+ "ldr q31, [x14, x21]\n"
+ "fsub v28.4s, v25.4s, v17.4s\n"
+ "add x13, x13, #16\n"
+ "fadd v27.4s, v27.4s, v18.4s\n"
+ "add x26, x26, #16\n"
+ "mov v21.16b, v29.16b\n"
+ "subs x19, x19, #1\n"
+ "fadd v20.4s, v19.4s, v22.4s\n"
+ "fsub v17.4s, v19.4s, v22.4s\n"
+ "fmul v28.4s, v28.4s, v0.s[0]\n"
+ "ldr q23, [x14, x22]\n"
+ "fmla v21.4s, v18.4s, v0.s[1]\n"
+ "fadd v29.4s, v12.4s, v26.4s\n"
+ "fsub v16.4s, v12.4s, v26.4s\n"
+ "fadd v25.4s, v30.4s, v31.4s\n"
+ "fadd v24.4s, v6.4s, v20.4s\n"
+ "mov v6.16b, v20.16b\n"
+ "fsub v22.4s, v30.4s, v31.4s\n"
+ "fadd v31.4s, v11.4s, v27.4s\n"
+ "fsub v12.4s, v11.4s, v27.4s\n"
+ "ldr q26, [x14, x23]\n"
+ "fmul v16.4s, v16.4s, v0.s[0]\n"
+ "fmla v6.4s, v29.4s, v0.s[1]\n"
+ "fadd v24.4s, v24.4s, v29.4s\n"
+ "mov v3.16b, v14.16b\n"
+ "fadd v20.4s, v14.4s, v28.4s\n"
+ "fadd v29.4s, v10.4s, v25.4s\n"
+ "mov v10.16b, v25.16b\n"
+ "fadd v25.4s, v7.4s, v31.4s\n"
+ "fmla v3.4s, v28.4s, v0.s[1]\n"
+ "fadd v14.4s, v23.4s, v26.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "mov v26.16b, v31.16b\n"
+ "fadd v31.4s, v15.4s, v20.4s\n"
+ "fsub v11.4s, v15.4s, v20.4s\n"
+ "fadd v20.4s, v17.4s, v16.4s\n"
+ "mov v7.16b, v17.16b\n"
+ "fadd v3.4s, v3.4s, v9.4s\n"
+ "ldr q18, [x14, x24]\n"
+ "fadd v29.4s, v29.4s, v14.4s\n"
+ "add x14, x14, #16\n"
+ "fmla v7.4s, v16.4s, v0.s[1]\n"
+ "ldr q19, [x27]\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fmla v10.4s, v14.4s, v0.s[1]\n"
+ "fadd v15.4s, v8.4s, v31.4s\n"
+ "mov v14.16b, v31.16b\n"
+ "fadd v28.4s, v24.4s, v29.4s\n"
+ "fsub v24.4s, v24.4s, v29.4s\n"
+ "fadd v7.4s, v7.4s, v5.4s\n"
+ "ldr q27, [x27, %[in_col_stride1]]\n"
+ "fadd v30.4s, v13.4s, v21.4s\n"
+ "fsub v9.4s, v13.4s, v21.4s\n"
+ "fadd v17.4s, v22.4s, v23.4s\n"
+ "mov v8.16b, v22.16b\n"
+ "fadd v25.4s, v25.4s, v28.4s\n"
+ "fmul v24.4s, v24.4s, v0.s[0]\n"
+ "fmla v26.4s, v28.4s, v0.s[1]\n"
+ "ldr q29, [x27, x21]\n"
+ "fmla v8.4s, v23.4s, v0.s[1]\n"
+ "ldr q28, [x27, x22]\n"
+ "fadd v13.4s, v4.4s, v30.4s\n"
+ "mov v4.16b, v30.16b\n"
+ "str q25, [%[outptr0]]\n" // Store output (0, 0)
+ "fadd v16.4s, v27.4s, v29.4s\n"
+ "str q26, [x28]\n" // Store output (2, 0)
+ "fsub v29.4s, v27.4s, v29.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
+ "ldr q23, [x27, x23]\n"
+ "fadd v30.4s, v28.4s, v23.4s\n"
+ "ldr q25, [x27, x24]\n"
+ "fadd v19.4s, v19.4s, v16.4s\n"
+ "add x27, x27, #16\n"
+ "fsub v27.4s, v28.4s, v23.4s\n"
+ "mov v16.16b, v16.16b\n"
+ "fadd v22.4s, v20.4s, v17.4s\n"
+ "fsub v20.4s, v20.4s, v17.4s\n"
+ "fadd v21.4s, v12.4s, v24.4s\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v19.4s, v19.4s, v30.4s\n"
+ "fmla v16.4s, v30.4s, v0.s[1]\n"
+ "fmul v27.4s, v27.4s, v0.s[0]\n"
+ "ldr q17, [%[inptr0]]\n"
+ "fmla v26.4s, v24.4s, v0.s[1]\n"
+ "ldr q23, [%[inptr0], %[in_col_stride1]]\n"
+ "str q21, [x17]\n" // Store output (1, 0)
+ "mov v5.16b, v29.16b\n"
+ "fadd v15.4s, v15.4s, v22.4s\n"
+ "fmul v20.4s, v20.4s, v0.s[0]\n"
+ "fadd v18.4s, v29.4s, v27.4s\n"
+ "fmla v14.4s, v22.4s, v0.s[1]\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "ldr q27, [%[inptr0], x21]\n"
+ "fadd v26.4s, v26.4s, v19.4s\n"
+ "ldr q24, [%[inptr0], x22]\n"
+ "str q15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1)
+ "fadd v12.4s, v11.4s, v20.4s\n"
+ "str q14, [x28, %[output_col_stride1]]\n" // Store output (2, 1)
+ "mov v28.16b, v11.16b\n"
+ "fadd v5.4s, v5.4s, v25.4s\n"
+ "ldr q11, [%[inptr0], x23]\n"
+ "str q26, [x18]\n" // Store output (3, 0)
+ "fadd v21.4s, v6.4s, v10.4s\n"
+ "str q12, [x17, %[output_col_stride1]]\n" // Store output (1, 1)
+ "fmla v28.4s, v20.4s, v0.s[1]\n"
+ "fsub v10.4s, v6.4s, v10.4s\n"
+ "ldr q12, [%[inptr0], x24]\n"
+ "mov v15.16b, v9.16b\n"
+ "ldr q20, [x25]\n"
+ "fadd v13.4s, v13.4s, v21.4s\n"
+ "ldr q19, [x25, %[in_col_stride1]]\n"
+ "fadd v28.4s, v28.4s, v18.4s\n"
+ "ldr q22, [x25, x21]\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "ldr q14, [x25, x22]\n"
+ "fmla v4.4s, v21.4s, v0.s[1]\n"
+ "ldr q18, [x25, x23]\n"
+ "str q13, [%[outptr0], x15]\n" // Store output (0, 2)
+ "fadd v6.4s, v2.4s, v3.4s\n"
+ "str q28, [x18, %[output_col_stride1]]\n" // Store output (3, 1)
+ "fadd v30.4s, v7.4s, v8.4s\n"
+ "fadd v13.4s, v9.4s, v10.4s\n"
+ "fmla v15.4s, v10.4s, v0.s[1]\n"
+ "str q4, [x28, x15]\n" // Store output (2, 2)
+ "fsub v2.4s, v2.4s, v3.4s\n"
+ "fadd v1.4s, v1.4s, v6.4s\n"
+ "ldr q3, [x25, x24]\n"
+ "fsub v8.4s, v7.4s, v8.4s\n"
+ "mov v6.16b, v6.16b\n"
+ "str q13, [x17, x15]\n" // Store output (1, 2)
+ "fadd v15.4s, v15.4s, v16.4s\n"
+ "mov v9.16b, v2.16b\n"
+ "fadd v4.4s, v23.4s, v27.4s\n"
+ "fadd v1.4s, v1.4s, v30.4s\n"
+ "fmla v6.4s, v30.4s, v0.s[1]\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "fadd v10.4s, v24.4s, v11.4s\n"
+ "str q15, [x18, x15]\n" // Store output (3, 2)
+ "fsub v13.4s, v23.4s, v27.4s\n"
+ "fadd v7.4s, v17.4s, v4.4s\n"
+ "fsub v11.4s, v24.4s, v11.4s\n"
+ "str q1, [%[outptr0], x16]\n" // Store output (0, 3)
+ "mov v4.16b, v4.16b\n"
+ "str q6, [x28, x16]\n" // Store output (2, 3)
+ "fadd v2.4s, v2.4s, v8.4s\n"
+ "fmla v9.4s, v8.4s, v0.s[1]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "add x28, x28, #16\n"
+ "fmul v11.4s, v11.4s, v0.s[0]\n"
+ "fmla v4.4s, v10.4s, v0.s[1]\n"
+ "str q2, [x17, x16]\n" // Store output (1, 3)
+ "mov v1.16b, v13.16b\n"
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "add x17, x17, #16\n"
+ "fadd v8.4s, v13.4s, v11.4s\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "str q9, [x18, x16]\n" // Store output (3, 3)
+ "add x18, x18, #16\n"
+ "fadd v1.4s, v1.4s, v12.4s\n"
+ "bne 2b\n"
+ "3:\n" // Quad tail
+ "fadd v2.4s, v19.4s, v22.4s\n"
+ "ldr q16, [x13]\n"
+ "fadd v23.4s, v14.4s, v18.4s\n"
+ "ldr q21, [x13, %[in_col_stride1]]\n"
+ "fsub v15.4s, v19.4s, v22.4s\n"
+ "ldr q24, [x13, x21]\n"
+ "fsub v31.4s, v14.4s, v18.4s\n"
+ "ldr q25, [x13, x22]\n"
+ "fadd v11.4s, v20.4s, v2.4s\n"
+ "ldr q17, [x13, x23]\n"
+ "mov v13.16b, v2.16b\n"
+ "ldr q9, [x13, x24]\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr q6, [x26]\n"
+ "fmul v31.4s, v31.4s, v0.s[0]\n"
+ "ldr q19, [x26, %[in_col_stride1]]\n"
+ "fadd v11.4s, v11.4s, v23.4s\n"
+ "ldr q22, [x26, x21]\n"
+ "fmla v13.4s, v23.4s, v0.s[1]\n"
+ "ldr q12, [x26, x22]\n"
+ "fadd v29.4s, v21.4s, v24.4s\n"
+ "ldr q26, [x26, x23]\n"
+ "fadd v15.4s, v15.4s, v31.4s\n"
+ "ldr q5, [x26, x24]\n"
+ "fmla v2.4s, v31.4s, v0.s[1]\n"
+ "ldr q10, [x14]\n"
+ "fadd v18.4s, v25.4s, v17.4s\n"
+ "add %[inptr0], %[inptr0], #16\n"
+ "fadd v27.4s, v16.4s, v29.4s\n"
+ "add x25, x25, #16\n"
+ "fsub v14.4s, v21.4s, v24.4s\n"
+ "ldr q30, [x14, %[in_col_stride1]]\n"
+ "fadd v2.4s, v2.4s, v3.4s\n"
+ "ldr q31, [x14, x21]\n"
+ "fsub v28.4s, v25.4s, v17.4s\n"
+ "add x13, x13, #16\n"
+ "fadd v27.4s, v27.4s, v18.4s\n"
+ "add x26, x26, #16\n"
+ "mov v21.16b, v29.16b\n"
+ "fadd v20.4s, v19.4s, v22.4s\n"
+ "fsub v17.4s, v19.4s, v22.4s\n"
+ "fadd v29.4s, v12.4s, v26.4s\n"
+ "fmul v28.4s, v28.4s, v0.s[0]\n"
+ "fsub v16.4s, v12.4s, v26.4s\n"
+ "fmla v21.4s, v18.4s, v0.s[1]\n"
+ "ldr q23, [x14, x22]\n"
+ "fadd v24.4s, v6.4s, v20.4s\n"
+ "mov v6.16b, v20.16b\n"
+ "fadd v25.4s, v30.4s, v31.4s\n"
+ "fsub v22.4s, v30.4s, v31.4s\n"
+ "fadd v20.4s, v14.4s, v28.4s\n"
+ "mov v3.16b, v14.16b\n"
+ "fmul v16.4s, v16.4s, v0.s[0]\n"
+ "fmla v6.4s, v29.4s, v0.s[1]\n"
+ "fadd v24.4s, v24.4s, v29.4s\n"
+ "ldr q26, [x14, x23]\n"
+ "fmla v3.4s, v28.4s, v0.s[1]\n"
+ "fadd v14.4s, v23.4s, v26.4s\n"
+ "fadd v29.4s, v10.4s, v25.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "mov v10.16b, v25.16b\n"
+ "fadd v31.4s, v11.4s, v27.4s\n"
+ "fsub v12.4s, v11.4s, v27.4s\n"
+ "ldr q18, [x14, x24]\n"
+ "fadd v3.4s, v3.4s, v9.4s\n"
+ "ldr q19, [x27]\n"
+ "fadd v29.4s, v29.4s, v14.4s\n"
+ "add x14, x14, #16\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fmla v10.4s, v14.4s, v0.s[1]\n"
+ "fadd v25.4s, v7.4s, v31.4s\n"
+ "mov v26.16b, v31.16b\n"
+ "fadd v31.4s, v15.4s, v20.4s\n"
+ "fsub v11.4s, v15.4s, v20.4s\n"
+ "fadd v28.4s, v24.4s, v29.4s\n"
+ "fsub v24.4s, v24.4s, v29.4s\n"
+ "fadd v30.4s, v13.4s, v21.4s\n"
+ "fsub v9.4s, v13.4s, v21.4s\n"
+ "fadd v20.4s, v17.4s, v16.4s\n"
+ "mov v7.16b, v17.16b\n"
+ "fadd v15.4s, v8.4s, v31.4s\n"
+ "mov v14.16b, v31.16b\n"
+ "fadd v25.4s, v25.4s, v28.4s\n"
+ "fmul v24.4s, v24.4s, v0.s[0]\n"
+ "fmla v7.4s, v16.4s, v0.s[1]\n"
+ "ldr q27, [x27, %[in_col_stride1]]\n"
+ "fmla v26.4s, v28.4s, v0.s[1]\n"
+ "ldr q29, [x27, x21]\n"
+ "fadd v13.4s, v4.4s, v30.4s\n"
+ "mov v4.16b, v30.16b\n"
+ "str q25, [%[outptr0]]\n" // Store output (0, 0)
+ "fadd v17.4s, v22.4s, v23.4s\n"
+ "fadd v7.4s, v7.4s, v5.4s\n"
+ "ldr q28, [x27, x22]\n"
+ "str q26, [x28]\n" // Store output (2, 0)
+ "mov v8.16b, v22.16b\n"
+ "fadd v16.4s, v27.4s, v29.4s\n"
+ "fsub v29.4s, v27.4s, v29.4s\n"
+ "fadd v21.4s, v12.4s, v24.4s\n"
+ "mov v26.16b, v12.16b\n"
+ "fmla v8.4s, v23.4s, v0.s[1]\n"
+ "fadd v22.4s, v20.4s, v17.4s\n"
+ "fsub v20.4s, v20.4s, v17.4s\n"
+ "ldr q23, [x27, x23]\n"
+ "fadd v19.4s, v19.4s, v16.4s\n"
+ "mov v16.16b, v16.16b\n"
+ "str q21, [x17]\n" // Store output (1, 0)
+ "fadd v30.4s, v28.4s, v23.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
+ "ldr q25, [x27, x24]\n"
+ "fsub v27.4s, v28.4s, v23.4s\n"
+ "add x27, x27, #16\n"
+ "mov v5.16b, v29.16b\n"
+ "fmla v26.4s, v24.4s, v0.s[1]\n"
+ "fadd v19.4s, v19.4s, v30.4s\n"
+ "fmla v16.4s, v30.4s, v0.s[1]\n"
+ "fadd v15.4s, v15.4s, v22.4s\n"
+ "fmul v20.4s, v20.4s, v0.s[0]\n"
+ "fmul v27.4s, v27.4s, v0.s[0]\n"
+ "fmla v14.4s, v22.4s, v0.s[1]\n"
+ "mov v28.16b, v11.16b\n"
+ "fadd v21.4s, v6.4s, v10.4s\n"
+ "fadd v26.4s, v26.4s, v19.4s\n"
+ "fsub v10.4s, v6.4s, v10.4s\n"
+ "str q15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1)
+ "fadd v12.4s, v11.4s, v20.4s\n"
+ "str q14, [x28, %[output_col_stride1]]\n" // Store output (2, 1)
+ "fadd v18.4s, v29.4s, v27.4s\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "fmla v28.4s, v20.4s, v0.s[1]\n"
+ "str q26, [x18]\n" // Store output (3, 0)
+ "fadd v13.4s, v13.4s, v21.4s\n"
+ "str q12, [x17, %[output_col_stride1]]\n" // Store output (1, 1)
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "fmla v4.4s, v21.4s, v0.s[1]\n"
+ "mov v15.16b, v9.16b\n"
+ "fadd v5.4s, v5.4s, v25.4s\n"
+ "fadd v28.4s, v28.4s, v18.4s\n"
+ "str q13, [%[outptr0], x15]\n" // Store output (0, 2)
+ "fadd v6.4s, v2.4s, v3.4s\n"
+ "fadd v13.4s, v9.4s, v10.4s\n"
+ "fmla v15.4s, v10.4s, v0.s[1]\n"
+ "str q4, [x28, x15]\n" // Store output (2, 2)
+ "fadd v30.4s, v7.4s, v8.4s\n"
+ "str q28, [x18, %[output_col_stride1]]\n" // Store output (3, 1)
+ "fsub v2.4s, v2.4s, v3.4s\n"
+ "fadd v1.4s, v1.4s, v6.4s\n"
+ "fsub v8.4s, v7.4s, v8.4s\n"
+ "str q13, [x17, x15]\n" // Store output (1, 2)
+ "fadd v15.4s, v15.4s, v16.4s\n"
+ "mov v6.16b, v6.16b\n"
+ "mov v9.16b, v2.16b\n"
+ "fadd v1.4s, v1.4s, v30.4s\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "str q15, [x18, x15]\n" // Store output (3, 2)
+ "fmla v6.4s, v30.4s, v0.s[1]\n"
+ "str q1, [%[outptr0], x16]\n" // Store output (0, 3)
+ "fadd v2.4s, v2.4s, v8.4s\n"
+ "str q6, [x28, x16]\n" // Store output (2, 3)
+ "fmla v9.4s, v8.4s, v0.s[1]\n"
+ "add %[outptr0], %[outptr0], #16\n"
+ "add x28, x28, #16\n"
+ "str q2, [x17, x16]\n" // Store output (1, 3)
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "add x17, x17, #16\n"
+ "str q9, [x18, x16]\n" // Store output (3, 3)
+ "add x18, x18, #16\n"
+ "4:\n" // Double
+ "cmp x20, #2\n"
+ "blt 5f\n"
+ "ldr d17, [%[inptr0]]\n"
+ "ldr d23, [%[inptr0], %[in_col_stride1]]\n"
+ "sub x20, x20, #2\n"
+ "ldr d27, [%[inptr0], x21]\n"
+ "ldr d24, [%[inptr0], x22]\n"
+ "fadd v4.4s, v23.4s, v27.4s\n"
+ "ldr d11, [%[inptr0], x23]\n"
+ "fadd v10.4s, v24.4s, v11.4s\n"
+ "ldr d12, [%[inptr0], x24]\n"
+ "fsub v13.4s, v23.4s, v27.4s\n"
+ "ldr d20, [x25]\n"
+ "fsub v11.4s, v24.4s, v11.4s\n"
+ "ldr d19, [x25, %[in_col_stride1]]\n"
+ "fadd v7.4s, v17.4s, v4.4s\n"
+ "ldr d22, [x25, x21]\n"
+ "mov v4.16b, v4.16b\n"
+ "ldr d14, [x25, x22]\n"
+ "mov v1.16b, v13.16b\n"
+ "ldr d18, [x25, x23]\n"
+ "fmul v11.4s, v11.4s, v0.s[0]\n"
+ "ldr d3, [x25, x24]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "ldr d16, [x13]\n"
+ "fmla v4.4s, v10.4s, v0.s[1]\n"
+ "ldr d21, [x13, %[in_col_stride1]]\n"
+ "fadd v2.4s, v19.4s, v22.4s\n"
+ "ldr d24, [x13, x21]\n"
+ "fadd v8.4s, v13.4s, v11.4s\n"
+ "ldr d25, [x13, x22]\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "ldr d17, [x13, x23]\n"
+ "fadd v23.4s, v14.4s, v18.4s\n"
+ "ldr d9, [x13, x24]\n"
+ "fadd v11.4s, v20.4s, v2.4s\n"
+ "ldr d6, [x26]\n"
+ "fsub v15.4s, v19.4s, v22.4s\n"
+ "ldr d19, [x26, %[in_col_stride1]]\n"
+ "fadd v1.4s, v1.4s, v12.4s\n"
+ "ldr d22, [x26, x21]\n"
+ "fsub v31.4s, v14.4s, v18.4s\n"
+ "ldr d12, [x26, x22]\n"
+ "fadd v11.4s, v11.4s, v23.4s\n"
+ "ldr d26, [x26, x23]\n"
+ "mov v13.16b, v2.16b\n"
+ "ldr d5, [x26, x24]\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr d10, [x14]\n"
+ "fmul v31.4s, v31.4s, v0.s[0]\n"
+ "add %[inptr0], %[inptr0], #8\n"
+ "fmla v13.4s, v23.4s, v0.s[1]\n"
+ "add x25, x25, #8\n"
+ "fadd v29.4s, v21.4s, v24.4s\n"
+ "add x13, x13, #8\n"
+ "fsub v14.4s, v21.4s, v24.4s\n"
+ "ldr d30, [x14, %[in_col_stride1]]\n"
+ "fadd v15.4s, v15.4s, v31.4s\n"
+ "add x26, x26, #8\n"
+ "fmla v2.4s, v31.4s, v0.s[1]\n"
+ "fadd v18.4s, v25.4s, v17.4s\n"
+ "fadd v27.4s, v16.4s, v29.4s\n"
+ "fsub v28.4s, v25.4s, v17.4s\n"
+ "mov v21.16b, v29.16b\n"
+ "fadd v20.4s, v19.4s, v22.4s\n"
+ "fsub v17.4s, v19.4s, v22.4s\n"
+ "ldr d31, [x14, x21]\n"
+ "fadd v2.4s, v2.4s, v3.4s\n"
+ "ldr d23, [x14, x22]\n"
+ "fadd v27.4s, v27.4s, v18.4s\n"
+ "fmul v28.4s, v28.4s, v0.s[0]\n"
+ "fmla v21.4s, v18.4s, v0.s[1]\n"
+ "fadd v29.4s, v12.4s, v26.4s\n"
+ "fadd v24.4s, v6.4s, v20.4s\n"
+ "fsub v16.4s, v12.4s, v26.4s\n"
+ "mov v6.16b, v20.16b\n"
+ "fadd v25.4s, v30.4s, v31.4s\n"
+ "fsub v22.4s, v30.4s, v31.4s\n"
+ "fadd v31.4s, v11.4s, v27.4s\n"
+ "fsub v12.4s, v11.4s, v27.4s\n"
+ "ldr d26, [x14, x23]\n"
+ "fadd v24.4s, v24.4s, v29.4s\n"
+ "fmul v16.4s, v16.4s, v0.s[0]\n"
+ "fmla v6.4s, v29.4s, v0.s[1]\n"
+ "mov v3.16b, v14.16b\n"
+ "fadd v20.4s, v14.4s, v28.4s\n"
+ "fadd v29.4s, v10.4s, v25.4s\n"
+ "mov v10.16b, v25.16b\n"
+ "fadd v25.4s, v7.4s, v31.4s\n"
+ "fmla v3.4s, v28.4s, v0.s[1]\n"
+ "fadd v14.4s, v23.4s, v26.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "mov v26.16b, v31.16b\n"
+ "fadd v31.4s, v15.4s, v20.4s\n"
+ "fsub v11.4s, v15.4s, v20.4s\n"
+ "fadd v20.4s, v17.4s, v16.4s\n"
+ "mov v7.16b, v17.16b\n"
+ "fadd v3.4s, v3.4s, v9.4s\n"
+ "ldr d18, [x14, x24]\n"
+ "fadd v29.4s, v29.4s, v14.4s\n"
+ "add x14, x14, #8\n"
+ "fmla v7.4s, v16.4s, v0.s[1]\n"
+ "ldr d19, [x27]\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fmla v10.4s, v14.4s, v0.s[1]\n"
+ "fadd v15.4s, v8.4s, v31.4s\n"
+ "mov v14.16b, v31.16b\n"
+ "fadd v28.4s, v24.4s, v29.4s\n"
+ "fsub v24.4s, v24.4s, v29.4s\n"
+ "fadd v7.4s, v7.4s, v5.4s\n"
+ "ldr d27, [x27, %[in_col_stride1]]\n"
+ "fadd v30.4s, v13.4s, v21.4s\n"
+ "fsub v9.4s, v13.4s, v21.4s\n"
+ "fadd v17.4s, v22.4s, v23.4s\n"
+ "mov v8.16b, v22.16b\n"
+ "fadd v25.4s, v25.4s, v28.4s\n"
+ "fmul v24.4s, v24.4s, v0.s[0]\n"
+ "fmla v26.4s, v28.4s, v0.s[1]\n"
+ "ldr d29, [x27, x21]\n"
+ "fmla v8.4s, v23.4s, v0.s[1]\n"
+ "ldr d28, [x27, x22]\n"
+ "fadd v13.4s, v4.4s, v30.4s\n"
+ "mov v4.16b, v30.16b\n"
+ "str d25, [%[outptr0]]\n" // Store output (0, 0)
+ "fadd v16.4s, v27.4s, v29.4s\n"
+ "str d26, [x28]\n" // Store output (2, 0)
+ "fsub v29.4s, v27.4s, v29.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
+ "ldr d23, [x27, x23]\n"
+ "fadd v30.4s, v28.4s, v23.4s\n"
+ "ldr d25, [x27, x24]\n"
+ "fadd v19.4s, v19.4s, v16.4s\n"
+ "add x27, x27, #8\n"
+ "fsub v27.4s, v28.4s, v23.4s\n"
+ "mov v16.16b, v16.16b\n"
+ "fadd v22.4s, v20.4s, v17.4s\n"
+ "fsub v20.4s, v20.4s, v17.4s\n"
+ "fadd v21.4s, v12.4s, v24.4s\n"
+ "mov v26.16b, v12.16b\n"
+ "fadd v19.4s, v19.4s, v30.4s\n"
+ "fmla v16.4s, v30.4s, v0.s[1]\n"
+ "fmul v27.4s, v27.4s, v0.s[0]\n"
+ "mov v5.16b, v29.16b\n"
+ "fmla v26.4s, v24.4s, v0.s[1]\n"
+ "fadd v15.4s, v15.4s, v22.4s\n"
+ "str d21, [x17]\n" // Store output (1, 0)
+ "fmul v20.4s, v20.4s, v0.s[0]\n"
+ "fmla v14.4s, v22.4s, v0.s[1]\n"
+ "mov v28.16b, v11.16b\n"
+ "fadd v18.4s, v29.4s, v27.4s\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "str d15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1)
+ "fadd v26.4s, v26.4s, v19.4s\n"
+ "fadd v12.4s, v11.4s, v20.4s\n"
+ "fmla v28.4s, v20.4s, v0.s[1]\n"
+ "str d14, [x28, %[output_col_stride1]]\n" // Store output (2, 1)
+ "fadd v21.4s, v6.4s, v10.4s\n"
+ "fadd v5.4s, v5.4s, v25.4s\n"
+ "fsub v10.4s, v6.4s, v10.4s\n"
+ "str d26, [x18]\n" // Store output (3, 0)
+ "mov v15.16b, v9.16b\n"
+ "str d12, [x17, %[output_col_stride1]]\n" // Store output (1, 1)
+ "fadd v28.4s, v28.4s, v18.4s\n"
+ "fadd v13.4s, v13.4s, v21.4s\n"
+ "fmla v4.4s, v21.4s, v0.s[1]\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "fadd v6.4s, v2.4s, v3.4s\n"
+ "fadd v30.4s, v7.4s, v8.4s\n"
+ "fsub v2.4s, v2.4s, v3.4s\n"
+ "str d28, [x18, %[output_col_stride1]]\n" // Store output (3, 1)
+ "fsub v8.4s, v7.4s, v8.4s\n"
+ "str d13, [%[outptr0], x15]\n" // Store output (0, 2)
+ "str d4, [x28, x15]\n" // Store output (2, 2)
+ "fadd v13.4s, v9.4s, v10.4s\n"
+ "fmla v15.4s, v10.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v6.4s\n"
+ "mov v6.16b, v6.16b\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "mov v9.16b, v2.16b\n"
+ "str d13, [x17, x15]\n" // Store output (1, 2)
+ "fadd v15.4s, v15.4s, v16.4s\n"
+ "fadd v1.4s, v1.4s, v30.4s\n"
+ "fmla v6.4s, v30.4s, v0.s[1]\n"
+ "fadd v2.4s, v2.4s, v8.4s\n"
+ "fmla v9.4s, v8.4s, v0.s[1]\n"
+ "str d15, [x18, x15]\n" // Store output (3, 2)
+ "str d1, [%[outptr0], x16]\n" // Store output (0, 3)
+ "str d2, [x17, x16]\n" // Store output (1, 3)
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "str d6, [x28, x16]\n" // Store output (2, 3)
+ "add %[outptr0], %[outptr0], #8\n"
+ "add x17, x17, #8\n"
+ "add x28, x28, #8\n"
+ "str d9, [x18, x16]\n" // Store output (3, 3)
+ "add x18, x18, #8\n"
+ "5:\n" // Scalar
+ "cbz x20, 6f\n"
+ "ldr s17, [%[inptr0]]\n"
+ "ldr s23, [%[inptr0], %[in_col_stride1]]\n"
+ "ldr s27, [%[inptr0], x21]\n"
+ "fadd v4.4s, v23.4s, v27.4s\n"
+ "ldr s24, [%[inptr0], x22]\n"
+ "fsub v13.4s, v23.4s, v27.4s\n"
+ "ldr s11, [%[inptr0], x23]\n"
+ "fadd v10.4s, v24.4s, v11.4s\n"
+ "ldr s12, [%[inptr0], x24]\n"
+ "fsub v11.4s, v24.4s, v11.4s\n"
+ "ldr s20, [x25]\n"
+ "fadd v7.4s, v17.4s, v4.4s\n"
+ "ldr s19, [x25, %[in_col_stride1]]\n"
+ "mov v4.16b, v4.16b\n"
+ "ldr s22, [x25, x21]\n"
+ "mov v1.16b, v13.16b\n"
+ "ldr s14, [x25, x22]\n"
+ "fmul v11.4s, v11.4s, v0.s[0]\n"
+ "ldr s18, [x25, x23]\n"
+ "fadd v7.4s, v7.4s, v10.4s\n"
+ "ldr s3, [x25, x24]\n"
+ "fmla v4.4s, v10.4s, v0.s[1]\n"
+ "ldr s16, [x13]\n"
+ "fadd v2.4s, v19.4s, v22.4s\n"
+ "ldr s21, [x13, %[in_col_stride1]]\n"
+ "fadd v8.4s, v13.4s, v11.4s\n"
+ "ldr s24, [x13, x21]\n"
+ "fmla v1.4s, v11.4s, v0.s[1]\n"
+ "ldr s25, [x13, x22]\n"
+ "fadd v23.4s, v14.4s, v18.4s\n"
+ "ldr s17, [x13, x23]\n"
+ "fadd v11.4s, v20.4s, v2.4s\n"
+ "ldr s9, [x13, x24]\n"
+ "fsub v15.4s, v19.4s, v22.4s\n"
+ "ldr s6, [x26]\n"
+ "fadd v1.4s, v1.4s, v12.4s\n"
+ "ldr s19, [x26, %[in_col_stride1]]\n"
+ "fsub v31.4s, v14.4s, v18.4s\n"
+ "ldr s22, [x26, x21]\n"
+ "fadd v11.4s, v11.4s, v23.4s\n"
+ "ldr s12, [x26, x22]\n"
+ "mov v13.16b, v2.16b\n"
+ "ldr s26, [x26, x23]\n"
+ "mov v2.16b, v15.16b\n"
+ "ldr s5, [x26, x24]\n"
+ "fmul v31.4s, v31.4s, v0.s[0]\n"
+ "ldr s10, [x14]\n"
+ "fmla v13.4s, v23.4s, v0.s[1]\n"
+ "fadd v29.4s, v21.4s, v24.4s\n"
+ "fsub v14.4s, v21.4s, v24.4s\n"
+ "fadd v18.4s, v25.4s, v17.4s\n"
+ "fsub v28.4s, v25.4s, v17.4s\n"
+ "ldr s30, [x14, %[in_col_stride1]]\n"
+ "fadd v15.4s, v15.4s, v31.4s\n"
+ "fmla v2.4s, v31.4s, v0.s[1]\n"
+ "fadd v27.4s, v16.4s, v29.4s\n"
+ "mov v21.16b, v29.16b\n"
+ "fadd v20.4s, v19.4s, v22.4s\n"
+ "fsub v17.4s, v19.4s, v22.4s\n"
+ "fmul v28.4s, v28.4s, v0.s[0]\n"
+ "ldr s31, [x14, x21]\n"
+ "fadd v2.4s, v2.4s, v3.4s\n"
+ "ldr s23, [x14, x22]\n"
+ "fadd v27.4s, v27.4s, v18.4s\n"
+ "fmla v21.4s, v18.4s, v0.s[1]\n"
+ "fadd v29.4s, v12.4s, v26.4s\n"
+ "fadd v24.4s, v6.4s, v20.4s\n"
+ "fsub v16.4s, v12.4s, v26.4s\n"
+ "mov v6.16b, v20.16b\n"
+ "fadd v25.4s, v30.4s, v31.4s\n"
+ "fsub v22.4s, v30.4s, v31.4s\n"
+ "fadd v20.4s, v14.4s, v28.4s\n"
+ "mov v3.16b, v14.16b\n"
+ "fadd v24.4s, v24.4s, v29.4s\n"
+ "fmla v6.4s, v29.4s, v0.s[1]\n"
+ "fmul v16.4s, v16.4s, v0.s[0]\n"
+ "ldr s26, [x14, x23]\n"
+ "fmla v3.4s, v28.4s, v0.s[1]\n"
+ "fadd v14.4s, v23.4s, v26.4s\n"
+ "fadd v29.4s, v10.4s, v25.4s\n"
+ "fsub v23.4s, v23.4s, v26.4s\n"
+ "mov v10.16b, v25.16b\n"
+ "fadd v31.4s, v11.4s, v27.4s\n"
+ "fsub v12.4s, v11.4s, v27.4s\n"
+ "ldr s18, [x14, x24]\n"
+ "fadd v3.4s, v3.4s, v9.4s\n"
+ "ldr s19, [x27]\n"
+ "fadd v29.4s, v29.4s, v14.4s\n"
+ "fmul v23.4s, v23.4s, v0.s[0]\n"
+ "fmla v10.4s, v14.4s, v0.s[1]\n"
+ "fadd v25.4s, v7.4s, v31.4s\n"
+ "mov v26.16b, v31.16b\n"
+ "fadd v31.4s, v15.4s, v20.4s\n"
+ "fsub v11.4s, v15.4s, v20.4s\n"
+ "fadd v30.4s, v13.4s, v21.4s\n"
+ "fsub v9.4s, v13.4s, v21.4s\n"
+ "fadd v28.4s, v24.4s, v29.4s\n"
+ "fsub v24.4s, v24.4s, v29.4s\n"
+ "ldr s27, [x27, %[in_col_stride1]]\n"
+ "fadd v15.4s, v8.4s, v31.4s\n"
+ "mov v14.16b, v31.16b\n"
+ "fadd v13.4s, v4.4s, v30.4s\n"
+ "mov v4.16b, v30.16b\n"
+ "fadd v25.4s, v25.4s, v28.4s\n"
+ "fmla v26.4s, v28.4s, v0.s[1]\n"
+ "fmul v24.4s, v24.4s, v0.s[0]\n"
+ "fadd v21.4s, v6.4s, v10.4s\n"
+ "fsub v10.4s, v6.4s, v10.4s\n"
+ "fadd v6.4s, v2.4s, v3.4s\n"
+ "fsub v2.4s, v2.4s, v3.4s\n"
+ "ldr s29, [x27, x21]\n"
+ "str s25, [%[outptr0]]\n" // Store output (0, 0)
+ "fadd v20.4s, v17.4s, v16.4s\n"
+ "str s26, [x28]\n" // Store output (2, 0)
+ "mov v7.16b, v17.16b\n"
+ "fadd v17.4s, v22.4s, v23.4s\n"
+ "mov v8.16b, v22.16b\n"
+ "fadd v13.4s, v13.4s, v21.4s\n"
+ "fmul v10.4s, v10.4s, v0.s[0]\n"
+ "fmla v7.4s, v16.4s, v0.s[1]\n"
+ "ldr s28, [x27, x22]\n"
+ "fmla v8.4s, v23.4s, v0.s[1]\n"
+ "ldr s23, [x27, x23]\n"
+ "fmla v4.4s, v21.4s, v0.s[1]\n"
+ "ldr s25, [x27, x24]\n"
+ "str s13, [%[outptr0], x15]\n" // Store output (0, 2)
+ "fadd v16.4s, v27.4s, v29.4s\n"
+ "fadd v7.4s, v7.4s, v5.4s\n"
+ "fadd v30.4s, v28.4s, v23.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
+ "fsub v29.4s, v27.4s, v29.4s\n"
+ "str s4, [x28, x15]\n" // Store output (2, 2)
+ "fsub v27.4s, v28.4s, v23.4s\n"
+ "fadd v19.4s, v19.4s, v16.4s\n"
+ "mov v16.16b, v16.16b\n"
+ "fadd v21.4s, v12.4s, v24.4s\n"
+ "mov v26.16b, v12.16b\n"
+ "mov v5.16b, v29.16b\n"
+ "fadd v22.4s, v20.4s, v17.4s\n"
+ "fmul v27.4s, v27.4s, v0.s[0]\n"
+ "fmla v16.4s, v30.4s, v0.s[1]\n"
+ "fadd v19.4s, v19.4s, v30.4s\n"
+ "fmla v26.4s, v24.4s, v0.s[1]\n"
+ "str s21, [x17]\n" // Store output (1, 0)
+ "fsub v20.4s, v20.4s, v17.4s\n"
+ "fadd v15.4s, v15.4s, v22.4s\n"
+ "fmla v14.4s, v22.4s, v0.s[1]\n"
+ "fadd v18.4s, v29.4s, v27.4s\n"
+ "fmla v5.4s, v27.4s, v0.s[1]\n"
+ "fadd v26.4s, v26.4s, v19.4s\n"
+ "mov v28.16b, v11.16b\n"
+ "fmul v20.4s, v20.4s, v0.s[0]\n"
+ "fadd v13.4s, v9.4s, v10.4s\n"
+ "str s15, [%[outptr0], %[output_col_stride1]]\n" // Store output (0, 1)
+ "mov v15.16b, v9.16b\n"
+ "str s14, [x28, %[output_col_stride1]]\n" // Store output (2, 1)
+ "fadd v5.4s, v5.4s, v25.4s\n"
+ "str s26, [x18]\n" // Store output (3, 0)
+ "fadd v30.4s, v7.4s, v8.4s\n"
+ "str s13, [x17, x15]\n" // Store output (1, 2)
+ "fadd v12.4s, v11.4s, v20.4s\n"
+ "fmla v28.4s, v20.4s, v0.s[1]\n"
+ "fmla v15.4s, v10.4s, v0.s[1]\n"
+ "fadd v1.4s, v1.4s, v6.4s\n"
+ "fsub v8.4s, v7.4s, v8.4s\n"
+ "mov v6.16b, v6.16b\n"
+ "mov v9.16b, v2.16b\n"
+ "str s12, [x17, %[output_col_stride1]]\n" // Store output (1, 1)
+ "fadd v28.4s, v28.4s, v18.4s\n"
+ "fadd v15.4s, v15.4s, v16.4s\n"
+ "fadd v1.4s, v1.4s, v30.4s\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "fmla v6.4s, v30.4s, v0.s[1]\n"
+ "str s28, [x18, %[output_col_stride1]]\n" // Store output (3, 1)
+ "str s1, [%[outptr0], x16]\n" // Store output (0, 3)
+ "str s6, [x28, x16]\n" // Store output (2, 3)
+ "fadd v2.4s, v2.4s, v8.4s\n"
+ "str s15, [x18, x15]\n" // Store output (3, 2)
+ "fmla v9.4s, v8.4s, v0.s[1]\n"
+ "str s2, [x17, x16]\n" // Store output (1, 3)
+ "fadd v9.4s, v9.4s, v5.4s\n"
+ "str s9, [x18, x16]\n" // Store output (3, 3)
+ "6:\n" // End
+ : [outptr0] "+r" (output), [inptr0] "+r" (inptr)
+ : [output_col_stride1] "r" (output_col_stride * sizeof(float)), [pcoeffs] "r" (coeffs), [n_channels] "r" ((long) n_channels), [in_row_stride] "r" (6 * matrix_stride * sizeof(float)), [in_col_stride1] "r" (matrix_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float))
+ : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+ );
+ }
+}
+
+#else
+
+template <>
+void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile(
+ const int n_channels,
+ const float* inptr,
+ const int matrix_stride,
+ const float* bptr,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ // Construct a map to the output cells
+ float *outptrs[output_tile_rows][output_tile_cols];
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
+ }
+ }
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __arm__
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[6][6], FZ[6][4], f[4][4], b;
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ F[i][j] = vld1_f32(inptr + m*matrix_stride);
+ }
+ }
+ inptr += 2;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 6; i++)
+ {
+ // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
+ FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
+
+ // FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
+ FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
+
+ // FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
+ FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
+
+ // FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
+ FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 4; j++)
+ {
+ // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
+ f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
+
+ // f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
+ f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
+
+ // f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
+ f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
+
+ // f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
+ f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
+ }
+
+ // Write out the output tile
+ if (bptr != nullptr)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ else
+ {
+ b = vdup_n_f32(0.0f);
+ }
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
+ outptrs[i][j] += 2;
+ }
+ }
+ }
+#endif // __arm__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[6][6], FZ[6][4], f[4][4], b;
+
+ // Read a 6x6 tile in the Winograd domain
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ F[i][j] = *(inptr + m*matrix_stride);
+ }
+ }
+ inptr++;
+
+ // Compute the matrix F Z
+ for (int i = 0; i < 6; i++)
+ {
+ FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4];
+ FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4];
+ FZ[i][2] = 1*F[i][1] + 1*F[i][2] + 4*F[i][3] + 4*F[i][4];
+ FZ[i][3] = 1*F[i][1] + -1*F[i][2] + 8*F[i][3] + -8*F[i][4] + 1*F[i][5];
+ }
+
+ // Compute the output tile f = ZT F Z
+ for (int j = 0; j < 4; j++)
+ {
+ f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j];
+ f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j];
+ f[2][j] = 1*FZ[1][j] + 1*FZ[2][j] + 4*FZ[3][j] + 4*FZ[4][j];
+ f[3][j] = 1*FZ[1][j] + -1*FZ[2][j] + 8*FZ[3][j] + -8*FZ[4][j] + 1*FZ[5][j];
+ }
+
+ // Write out the output tile
+ if (bptr != nullptr)
+ {
+ b = *(bptr++);
+ }
+ else
+ {
+ b = 0.0f;
+ }
+ for (int i = 0; i < output_tile_rows; i++)
+ {
+ for (int j = 0; j < output_tile_cols; j++)
+ {
+ *(outptrs[i][j]++) = f[i][j] + b;
+ }
+ }
+ }
+}
+
+#endif
+
+template class OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>;
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
index 58bed71a47..ce921cea01 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,42 +22,29 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "output.hpp"
+#include "arm.hpp"
-namespace
+namespace winograd
{
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_6_3_fp32_process_tile(
+template <>
+void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
const int n_channels,
- const float* const matrix_base,
+ const float* inptr,
const int matrix_stride,
- const float* const biases,
+ const float* bptr,
float* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int _pad_bottom,
- const int _pad_right
+ const int, // No need to stride across rows
+ const int output_col_stride
)
{
- (void) output_row_stride;
- (void) _pad_bottom;
- constexpr int output_tile_cols = 6;
- constexpr int inner_tile_cols = 8;
-
- const int pad_right = Specialized ? PadRight : _pad_right;
- const int cells_j = output_tile_cols - pad_right;
-
// Construct a map to the output cells
- float *outptrs[cells_j];
- for (int j = 0; j < cells_j; j++)
+ float *outptrs[output_tile_cols];
+ for (int j = 0; j < output_tile_cols; j++)
{
outptrs[j] = output + j*output_col_stride;
}
- const float *inptr = matrix_base;
- const float *bptr = biases;
// For each channel of the output
int channels_remaining = n_channels;
@@ -87,7 +74,7 @@ void winograd_output_transform_6_3_fp32_process_tile(
b = vld1q_f32(bptr);
bptr += 4;
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
vst1q_f32(outptrs[j], f[j] + b);
outptrs[j] += 4;
@@ -118,7 +105,7 @@ void winograd_output_transform_6_3_fp32_process_tile(
b = vld1_f32(bptr);
bptr += 2;
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
vst1_f32(outptrs[j], f[j] + b);
outptrs[j] += 2;
@@ -149,31 +136,14 @@ void winograd_output_transform_6_3_fp32_process_tile(
{
b = *(bptr++);
}
- for (int j = 0; j < cells_j; j++)
+ for (int j = 0; j < output_tile_cols; j++)
{
*(outptrs[j]++) = f[j] + b;
}
}
}
-} // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 3, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_6_3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
- winograd_output_transform_6_3_fp32_process_tile<true, 1>,
- winograd_output_transform_6_3_fp32_process_tile<true, 2>,
- winograd_output_transform_6_3_fp32_process_tile<true, 3>,
- winograd_output_transform_6_3_fp32_process_tile<true, 4>,
- winograd_output_transform_6_3_fp32_process_tile<true, 5>,
-};
+template class OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>;
+template class OutputTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>;
-template class OutputTransform<1, 3, 1, 8, float>;
-template class OutputTransform<3, 1, 8, 1, float>;
-} // namespace winograd
+} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..37ae43fdb0
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+)
+{
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[kernel_cols];
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[kernel_cols], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36.0f;
+ V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
+ V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
+ V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
+ V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
+ V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
+ V[7] = (w[6]*1) / 1.0f;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+}
+
+template class WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>;
+template class WeightTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>;
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..8fab6db1ba
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input,
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+)
+{
+ constexpr int inner_tile_i = 4;
+ constexpr int inner_tile_j = 4;
+
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const auto weight_row_stride = 3 * weight_col_stride;
+ const float *inptrs[3][3];
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
+ }
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+#ifdef __aarch64__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed in this kernel
+ float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
+
+ // Read weights
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ w[i][j] = vld1q_f32(inptrs[i][j]);
+ inptrs[i][j] += 4;
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 3; j++)
+ {
+ Ww[0][j] = w[0][j];
+
+ // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
+ Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+ // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
+ Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+ Ww[3][j] = w[2][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < inner_tile_i; i++)
+ {
+ V[i][0] = Ww[i][0];
+
+ // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
+ V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+ // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
+ V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+ V[i][3] = Ww[i][2];
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < inner_tile_i; i++)
+ {
+ for (int j = 0; j < inner_tile_j; j++, m++)
+ {
+ vst1q_f32(outptr + m*matrix_stride, V[i][j]);
+ }
+ }
+ outptr += 4;
+ }
+#endif // __aarch64__
+#ifdef __arm_any__
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed in this kernel
+ float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
+
+ // Read weights
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ w[i][j] = vld1_f32(inptrs[i][j]);
+ inptrs[i][j] += 2;
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 3; j++)
+ {
+ Ww[0][j] = w[0][j];
+
+ // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
+ Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+ // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
+ Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+ Ww[3][j] = w[2][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < inner_tile_i; i++)
+ {
+ V[i][0] = Ww[i][0];
+
+ // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
+ V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+ // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
+ V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+ V[i][3] = Ww[i][2];
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < inner_tile_i; i++)
+ {
+ for (int j = 0; j < inner_tile_j; j++, m++)
+ {
+ vst1_f32(outptr + m*matrix_stride, V[i][j]);
+ }
+ }
+ outptr += 2;
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
+
+ // Read weights
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ w[i][j] = *(inptrs[i][j]++);
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 3; j++)
+ {
+ Ww[0][j] = w[0][j];
+ Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
+ Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
+ Ww[3][j] = w[2][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < inner_tile_i; i++)
+ {
+ V[i][0] = Ww[i][0];
+ V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
+ V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
+ V[i][3] = Ww[i][2];
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < inner_tile_i; i++)
+ {
+ for (int j = 0; j < inner_tile_j; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = V[i][j];
+ }
+ }
+ outptr++;
+ }
+ }
+}
+
+template class WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>;
+
+} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..79f4fa30c4
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input,
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+)
+{
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const auto weight_row_stride = 5 * weight_col_stride;
+ const float *inptrs[5][5];
+ for (int i = 0; i < 5; i++)
+ {
+ for (int j = 0; j < 5; j++)
+ {
+ inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
+ }
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+#ifdef __aarch64__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed in this kernel
+ float32x4_t w[5][5], Ww[6][5], V[6][6];
+
+ // Read weights
+ for (int i = 0; i < 5; i++)
+ {
+ for (int j = 0; j < 5; j++)
+ {
+ w[i][j] = vld1q_f32(inptrs[i][j]);
+ inptrs[i][j] += 4;
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 5; j++)
+ {
+ // Ww[0][j] = w[0][j]/4.0f;
+ Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
+
+ // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
+ Ww[1][j] = vmulq_n_f32(
+ vaddq_f32(
+ vaddq_f32(
+ vaddq_f32(w[1][j], w[0][j]),
+ vaddq_f32(w[3][j], w[2][j])
+ ),
+ w[4][j]
+ ),
+ -1.0f/6.0f
+ );
+
+ // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
+ // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
+ Ww[2][j] = vmulq_n_f32(
+ vsubq_f32(
+ vaddq_f32(
+ vsubq_f32(w[1][j], w[0][j]),
+ vsubq_f32(w[3][j], w[2][j])
+ ),
+ w[4][j]
+ ),
+ 1.0f/6.0f
+ );
+
+ // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
+ Ww[3][j] = vmulq_n_f32(
+ vmlaq_n_f32(
+ vaddq_f32(
+ vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
+ vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+ ),
+ w[4][j], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
+ Ww[4][j] = vmulq_n_f32(
+ vmlaq_n_f32(
+ vaddq_f32(
+ vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
+ vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+ ),
+ w[4][j], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // Ww[5][j] = w[4][j];
+ Ww[5][j] = w[4][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < 6; i++)
+ {
+ // V[i][0] = Ww[i][0]/4.0f;
+ V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
+
+ // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
+ V[i][1] = vmulq_n_f32(
+ vaddq_f32(
+ vaddq_f32(
+ vaddq_f32(Ww[i][1], Ww[i][0]),
+ vaddq_f32(Ww[i][3], Ww[i][2])
+ ),
+ Ww[i][4]
+ ),
+ -1.0f/6.0f
+ );
+
+ // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
+ // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
+ V[i][2] = vmulq_n_f32(
+ vsubq_f32(
+ vaddq_f32(
+ vsubq_f32(Ww[i][1], Ww[i][0]),
+ vsubq_f32(Ww[i][3], Ww[i][2])
+ ),
+ Ww[i][4]
+ ),
+ 1.0f/6.0f
+ );
+
+ // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
+ V[i][3] = vmulq_n_f32(
+ vmlaq_n_f32(
+ vaddq_f32(
+ vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
+ vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+ ),
+ Ww[i][4], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
+ V[i][4] = vmulq_n_f32(
+ vmlaq_n_f32(
+ vaddq_f32(
+ vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
+ vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+ ),
+ Ww[i][4], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // V[i][5] = Ww[i][4];
+ V[i][5] = Ww[i][4];
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ vst1q_f32(outptr + m*matrix_stride, V[i][j]);
+ }
+ }
+ outptr += 4;
+ }
+#endif // __aarch64__
+#ifdef __arm_any__
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed in this kernel
+ float32x2_t w[5][5], Ww[6][5], V[6][6];
+
+ // Read weights
+ for (int i = 0; i < 5; i++)
+ {
+ for (int j = 0; j < 5; j++)
+ {
+ w[i][j] = vld1_f32(inptrs[i][j]);
+ inptrs[i][j] += 2;
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 5; j++)
+ {
+ // Ww[0][j] = w[0][j]/4.0f;
+ Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
+
+ // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
+ Ww[1][j] = vmul_n_f32(
+ vadd_f32(
+ vadd_f32(
+ vadd_f32(w[1][j], w[0][j]),
+ vadd_f32(w[3][j], w[2][j])
+ ),
+ w[4][j]
+ ),
+ -1.0f/6.0f
+ );
+
+ // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
+ // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
+ Ww[2][j] = vmul_n_f32(
+ vsub_f32(
+ vadd_f32(
+ vsub_f32(w[1][j], w[0][j]),
+ vsub_f32(w[3][j], w[2][j])
+ ),
+ w[4][j]
+ ),
+ 1.0f/6.0f
+ );
+
+ // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
+ Ww[3][j] = vmul_n_f32(
+ vmla_n_f32(
+ vadd_f32(
+ vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
+ vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+ ),
+ w[4][j], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
+ Ww[4][j] = vmul_n_f32(
+ vmla_n_f32(
+ vadd_f32(
+ vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
+ vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+ ),
+ w[4][j], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // Ww[5][j] = w[4][j];
+ Ww[5][j] = w[4][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < 6; i++)
+ {
+ // V[i][0] = Ww[i][0]/4.0f;
+ V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
+
+ // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
+ V[i][1] = vmul_n_f32(
+ vadd_f32(
+ vadd_f32(
+ vadd_f32(Ww[i][1], Ww[i][0]),
+ vadd_f32(Ww[i][3], Ww[i][2])
+ ),
+ Ww[i][4]
+ ),
+ -1.0f/6.0f
+ );
+
+ // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
+ // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
+ V[i][2] = vmul_n_f32(
+ vsub_f32(
+ vadd_f32(
+ vsub_f32(Ww[i][1], Ww[i][0]),
+ vsub_f32(Ww[i][3], Ww[i][2])
+ ),
+ Ww[i][4]
+ ),
+ 1.0f/6.0f
+ );
+
+ // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
+ V[i][3] = vmul_n_f32(
+ vmla_n_f32(
+ vadd_f32(
+ vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
+ vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+ ),
+ Ww[i][4], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
+ V[i][4] = vmul_n_f32(
+ vmla_n_f32(
+ vadd_f32(
+ vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
+ vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+ ),
+ Ww[i][4], 2.0f
+ ),
+ 1.0f/3.0f
+ );
+
+ // V[i][5] = Ww[i][4];
+ V[i][5] = Ww[i][4];
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ vst1_f32(outptr + m*matrix_stride, V[i][j]);
+ }
+ }
+ outptr += 2;
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[5][5], Ww[6][5], V[6][6];
+
+ // Read weights
+ for (int i = 0; i < 5; i++)
+ {
+ for (int j = 0; j < 5; j++)
+ {
+ w[i][j] = *(inptrs[i][j]++);
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 5; j++)
+ {
+ Ww[0][j] = w[0][j]/4.0f;
+ Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
+ Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
+ Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
+ Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
+ Ww[5][j] = w[4][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < 6; i++)
+ {
+ V[i][0] = Ww[i][0]/4.0f;
+ V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
+ V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
+ V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
+ V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
+ V[i][5] = Ww[i][4];
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = V[i][j];
+ }
+ }
+ outptr++;
+ }
+ }
+}
+
+template class WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..fb3d712954
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+)
+{
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[kernel_cols];
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[kernel_cols], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36;
+ V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
+ V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
+ V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
+ V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
+ V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
+ V[7] = (w[4]*1) / 1;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+}
+
+template class WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>;
+template class WeightTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>;
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..9e7040bca8
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+)
+{
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const auto weight_row_stride = 3 * weight_col_stride;
+ const float *inptrs[3][3];
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
+ }
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+#ifdef __aarch64__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed in this kernel
+ float32x4_t w[3][3], Ww[6][3], V[6][6];
+
+ // Read weights
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ w[i][j] = vld1q_f32(inptrs[i][j]);
+ inptrs[i][j] += 4;
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 3; j++)
+ {
+ // Ww[0][j] = 6*w[0][j];
+ Ww[0][j] = vmulq_n_f32(w[0][j], 6.0);
+
+ // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
+ Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
+
+ // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
+ Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
+
+ // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
+ Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+ // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
+ Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+ // Ww[5][j] = 24*w[2][j];
+ Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f);
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < 6; i++)
+ {
+ const float recip576 = 1.0f / 576.0f;
+
+ // V[i][0] = 6*Ww[i][0];
+ V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576);
+
+ // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
+ V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
+
+ // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
+ V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
+
+ // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
+ V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+ // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
+ V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+ // V[i][5] = 24*Ww[i][2];
+ V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576);
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ vst1q_f32(outptr + m*matrix_stride, V[i][j]);
+ }
+ }
+ outptr += 4;
+ }
+#endif // __aarch64__
+#ifdef __arm_any__
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed in this kernel
+ float32x2_t w[3][3], Ww[6][3], V[6][6];
+
+ // Read weights
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ w[i][j] = vld1_f32(inptrs[i][j]);
+ inptrs[i][j] += 2;
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 3; j++)
+ {
+ // Ww[0][j] = 6*w[0][j];
+ Ww[0][j] = vmul_n_f32(w[0][j], 6.0);
+
+ // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
+ Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
+
+ // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
+ Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
+
+ // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
+ Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+ // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
+ Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+ // Ww[5][j] = 24*w[2][j];
+ Ww[5][j] = vmul_n_f32(w[2][j], 24.0f);
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < 6; i++)
+ {
+ const float recip576 = 1.0f / 576.0f;
+
+ // V[i][0] = 6*Ww[i][0];
+ V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576);
+
+ // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
+ V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
+
+ // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2];
+ V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
+
+ // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2];
+ V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+ // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2];
+ V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+ // V[i][5] = 24*Ww[i][2];
+ V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576);
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ vst1_f32(outptr + m*matrix_stride, V[i][j]);
+ }
+ }
+ outptr += 2;
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[3][3], Ww[6][3], V[6][6];
+
+ // Read weights
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ w[i][j] = *(inptrs[i][j]++);
+ }
+ }
+
+ // Compute the matrix W w
+ for (int j = 0; j < 3; j++)
+ {
+ Ww[0][j] = 6*w[0][j];
+ Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
+ Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j];
+ Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j];
+ Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j];
+ Ww[5][j] = 24*w[2][j];
+ }
+
+ // Compute V = W w WT
+ for (int i = 0; i < 6; i++)
+ {
+ V[i][0] = ( 6*Ww[i][0]) / 576.0;
+ V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
+ V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
+ V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
+ V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0;
+ V[i][5] = (24*Ww[i][2]) / 576.0;
+ }
+
+ // Store the transformed weights
+ for (int i = 0, m = 0; i < 6; i++)
+ {
+ for (int j = 0; j < 6; j++, m++)
+ {
+ *(outptr + m*matrix_stride) = V[i][j];
+ }
+ }
+ outptr++;
+ }
+ }
+}
+
+template class WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>;
+
+} // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000000..45723482a2
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+)
+{
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[3];
+ for (int j = 0; j < 3; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[3], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < 3; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36.0f;
+ V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
+ V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
+ V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
+ V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
+ V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
+ V[7] = (w[2]*1) / 1;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+}
+
+template class WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>;
+template class WeightTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>;
+
+} // namespace
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index 1d92471162..b4247be6cb 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -33,7 +33,7 @@
#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "support/ToolchainSupport.h"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd.hpp"
namespace arm_compute
{
@@ -236,10 +236,10 @@ bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_siz
NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(memory_manager), _gemm_function(memory_manager), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _activationlayer_function(),
- _permute_input(), _permute_weights(), _permute_output(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), _weights_hwio(), _input(), _weights(), _output(),
- _is_prepared(false), _is_activationlayer_enabled(false)
+ _permute_input(), _permute_weights(), _permute_output(), _input_transformed(), _output_transformed(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(),
+ _weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false)
{
-} /* arm_compute */
+}
void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info,
bool enable_fast_math)
@@ -436,9 +436,9 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
b_info.init(b_shape, 1, data_type, b_strides, 0, kernel_storage_size);
d_info.init(d_shape, 1, data_type, d_strides, 0, output_storage_size);
- _input_workspace.allocator()->init(a_info, storage_alignment);
+ _input_transformed.allocator()->init(a_info, storage_alignment);
_kernel_storage.allocator()->init(b_info, storage_alignment);
- _output_workspace.allocator()->init(d_info, storage_alignment);
+ _output_transformed.allocator()->init(d_info, storage_alignment);
// configure and allocate dst tensor to be used to convert from winograd domain to spatial domain when calling to reshape_output()
TensorInfo info(TensorShape(_output->info()->dimension(2), _output->info()->dimension(0),
@@ -447,6 +447,8 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
_output_nhwc.allocator()->init(info);
// Configure the InputTransform
+ _memory_group.manage(&_input_transformed);
+ _memory_group.manage(&_output_transformed);
_memory_group.manage(&_input_workspace);
_memory_group.manage(&_output_workspace);
@@ -456,7 +458,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
_permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U));
_input_nhwc.allocator()->allocate();
transform_input_kernel->configure(&_input_nhwc, in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type,
- &_input_workspace, input_matrix_stride);
+ &_input_transformed, input_matrix_stride, &_input_workspace);
// Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map]
_permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 2U, 0U, 1U));
@@ -465,26 +467,39 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
//The biases tensor has not been allocated at this point in time, the output transform will add the biases to the final result in the run() method
_memory_group.manage(&_output_nhwc);
- transform_output_kernel->configure(biases, &_output_workspace,
+ transform_output_kernel->configure(biases, &_output_transformed,
output_matrix_stride, &_output_nhwc,
- in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels);
+ in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels, &_output_workspace);
}
else
{
transform_input_kernel->configure(_input, in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type,
- &_input_workspace, input_matrix_stride);
+ &_input_transformed, input_matrix_stride, &_input_workspace);
// Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map]
_permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 0U, 1U, 2U));
transform_weights_kernel->configure(&_weights_hwio, &_kernel_storage, kernel_matrix_stride, out_channels, in_channels);
- transform_output_kernel->configure(biases, &_output_workspace,
+ transform_output_kernel->configure(biases, &_output_transformed,
output_matrix_stride, _output,
- in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels);
+ in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels, &_output_workspace);
}
- _gemm_function.configure(&_input_workspace, &_kernel_storage, nullptr, &_output_workspace, 1.0f, 0.f);
+ //Configure input/output workspaces, get_working_space_size() must be called after configure()
+ const unsigned int max_num_threads = NEScheduler::get().num_threads_hint();
+ const size_t input_workspace_size = transform_input_kernel->get_working_space_size(max_num_threads);
+ const size_t output_workspace_size = transform_output_kernel->get_working_space_size(max_num_threads);
+
+ TensorInfo input_workspace_info(TensorShape(input_workspace_size), 1, _input->info()->data_type());
+ _input_workspace.allocator()->init(input_workspace_info);
+
+ TensorInfo output_workspace_info(TensorShape(output_workspace_size), 1, _output->info()->data_type());
+ _output_workspace.allocator()->init(output_workspace_info);
+
+ _gemm_function.configure(&_input_transformed, &_kernel_storage, nullptr, &_output_transformed, 1.0f, 0.f);
+ _input_transformed.allocator()->allocate();
+ _output_transformed.allocator()->allocate();
_input_workspace.allocator()->allocate();
_output_workspace.allocator()->allocate();