aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-08-22 11:40:33 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitbda6e4b51bc4045c97100bb9d562164ba7c6c28f (patch)
tree8924bbae251b34dc35a4ffc9a9ece79d28c4415b
parent238c97cd8bfdb6dfce5c4eefed6aac4d9bb59457 (diff)
downloadComputeLibrary-bda6e4b51bc4045c97100bb9d562164ba7c6c28f.tar.gz
COMPMID-1247:Integrate kernel size 1x3 & 3x1 support in NEWinogradLayer.
Change-Id: I6fe198881230e49864c841a3b2366ccf2a9247f9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/145210 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h28
-rw-r--r--arm_compute/core/NEON/kernels/convolution/common/utils.hpp2
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp22
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp22
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp24
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp74
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp2
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_5x5_fp32.cpp2
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_4x4_3x3_fp32.cpp2
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/input_6_3_fp32.cpp226
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp186
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp125
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp3
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp251
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp83
15 files changed, 920 insertions, 132 deletions
diff --git a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
index 9cdd69a70a..c71c105d92 100644
--- a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
@@ -159,7 +159,7 @@ public:
void run(const Window &window, const ThreadInfo &info) override;
/** Winograd base kernel */
- using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelCols, KernelCols>;
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
/** Winograd convolution kernel */
using WinogradConv = typename WinogradBase::template Convolution<T, T>;
@@ -360,6 +360,21 @@ template <typename T>
class INEWinogradLayerTransformWeightsKernel : public INEKernel
{
public:
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ INEWinogradLayerTransformWeightsKernel(const INEWinogradLayerTransformWeightsKernel &) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ INEWinogradLayerTransformWeightsKernel &operator=(const INEWinogradLayerTransformWeightsKernel &) = default;
+ /** Allow instances of this class to be moved */
+ INEWinogradLayerTransformWeightsKernel(INEWinogradLayerTransformWeightsKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ INEWinogradLayerTransformWeightsKernel &operator=(INEWinogradLayerTransformWeightsKernel &&) = default;
+
+ INEWinogradLayerTransformWeightsKernel()
+ {
+ }
+ virtual ~INEWinogradLayerTransformWeightsKernel()
+ {
+ }
/** Determine how much memory (in units of T) to allocate for the
* transformed weights.
*
@@ -388,9 +403,14 @@ public:
virtual void configure(const ITensor *weights_hwio, ITensor *output, const int matrix_stride, const int num_output_channels, const int num_input_channels) = 0;
- virtual ~INEWinogradLayerTransformWeightsKernel()
- {
- }
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformWeightsKernel
+ *
+ * @param[in] input First tensor input info. Data types supported: F32.
+ * @param[in] weights Weights tensor info. Data types supported: same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights);
};
/** NEON kernel to perform Winograd weights transform. */
diff --git a/arm_compute/core/NEON/kernels/convolution/common/utils.hpp b/arm_compute/core/NEON/kernels/convolution/common/utils.hpp
index 5f42719119..25bfa332fb 100644
--- a/arm_compute/core/NEON/kernels/convolution/common/utils.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/common/utils.hpp
@@ -26,7 +26,7 @@
void PrintMatrix(const float *const m, const int M, const int N, const int row_stride);
-inline int iceildiv(const int a, const int b)
+constexpr inline int iceildiv(const int a, const int b)
{
return (a + b - 1) / b;
}
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
index 13218030d2..369c2ff48f 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
@@ -50,6 +50,22 @@ namespace winograd
const int matrix_row_stride /** Stride within matrices. */
)
{
+ // If an Nx1 kernel then transpose and redirect to the 1xN implementation
+ if (kernel_cols == 1)
+ {
+ WinogradGEMM<output_tile_cols, output_tile_rows, kernel_cols, kernel_rows>::
+ template InputTransform<T>::execute(
+ input,
+ n_batches, in_batch_stride,
+ n_cols, in_col_stride,
+ n_rows, in_row_stride,
+ n_channels, padding,
+ tile_N, tile_M,
+ output, matrix_stride, matrix_batch_stride, matrix_row_stride
+ );
+ return;
+ }
+
// Compute the padding required on each edge of the image
const int pad_top = (padding == PADDING_SAME) ? (kernel_rows - 1) / 2 : 0;
const int pad_left = (padding == PADDING_SAME) ? (kernel_cols - 1) / 2 : 0;
@@ -111,6 +127,12 @@ namespace winograd
const int n_cols
)
{
+ if (kernel_cols == 1)
+ {
+ // If an Nx1 implementation then this should never be reached.
+ return;
+ }
+
constexpr int tile_overlap = kernel_cols - 1;
// Loop over columns of tiles
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
index 700ca76c68..6ed146bf85 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
@@ -45,6 +45,22 @@ namespace winograd
T* const output
)
{
+ // If an Nx1 kernel then transpose and redirect to the 1xN implementation.
+ if (kernel_cols == 1)
+ {
+ WinogradGEMM<output_tile_cols, output_tile_rows, kernel_cols, kernel_rows>::
+ template OutputTransform<T>::execute(
+ n_batches,
+ output_batch_stride,
+ n_cols, output_col_stride,
+ n_rows, output_row_stride,
+ n_channels,
+ matrix_base, matrix_stride, matrix_row_stride,
+ biases, output
+ );
+ return;
+ }
+
// Compute the number of tiles and hence the padding required on the bottom
// and right of the image.
const int tile_M = iceildiv(n_rows, output_tile_rows);
@@ -98,6 +114,12 @@ namespace winograd
const int row_pad_right
)
{
+ if (kernel_cols == 1)
+ {
+ // If an Nx1 implementation then this should never be reached.
+ return;
+ }
+
// Loop over columns of tiles
for (int tile_j = 0; tile_j < tile_N; tile_j++)
{
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
index bc067fd07a..7098fc48a1 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
@@ -49,8 +49,8 @@ class WinogradGEMM
static constexpr int output_tile_cols = OutputTileCols;
static constexpr int kernel_rows = KernelRows;
static constexpr int kernel_cols = KernelCols;
- static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1; // TODO Check
- static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1; // TODO Check
+ static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1;
+ static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1;
static constexpr int N_GEMMS = inner_tile_rows * inner_tile_cols;
/** Transform weights from the spatial to the Winograd domain. */
@@ -196,8 +196,21 @@ class WinogradGEMM
const int n_cols
);
- static constexpr int max_pad_bottom = inner_tile_rows - 1;
- static constexpr int max_pad_right = inner_tile_cols - 1;
+ // Tile overlaps
+ static constexpr int overlap_rows = kernel_rows - 1;
+ static constexpr int overlap_cols = kernel_cols - 1;
+
+ // Maximum padding and number of distinct paddings
+ static constexpr int max_pad_top = kernel_rows / 2;
+ static constexpr int n_pad_top = 1 + iceildiv(max_pad_top, inner_tile_rows - overlap_rows);
+
+ static constexpr int max_pad_left = kernel_cols / 2;
+ static constexpr int n_pad_left = 1 + iceildiv(max_pad_left, inner_tile_cols - overlap_cols);
+
+ static constexpr int n_pad_bottom = inner_tile_rows;
+ static constexpr int n_pad_right = inner_tile_cols;
+
+
/** Process a single tile of the input tensor. */
template <int pad_top, int pad_left, int pad_bottom, int pad_right>
@@ -205,7 +218,8 @@ class WinogradGEMM
// Array of methods to transform tiles of the input tensor.
typedef void (*TileFn)(int, const T*, int, int, T*, int);
- static const TileFn tile_fns[2][2][max_pad_bottom][max_pad_right];
+ static const TileFn
+ tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right];
/* Member values for instance-based API. */
const T* const _inptr;
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 3d7a16dd45..8f990712e8 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -40,19 +40,27 @@ namespace arm_compute
namespace
{
+inline bool is_kernel_size_supported(Size2D size)
+{
+ const std::array<Size2D, 4> supported_input_sizes = { { Size2D(1, 3), Size2D(3, 1), Size2D(5, 5), Size2D(3, 3) } };
+ return std::end(supported_input_sizes) != std::find(std::begin(supported_input_sizes), std::end(supported_input_sizes), size);
+}
+
Status validate_arguments_winograd_weight_trans(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_width) != 3 && input->dimension(idx_width) != 5);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_width) != input->dimension(idx_height));
+ const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
+ const auto input_width = input->dimension(idx_width);
+ const auto input_height = input->dimension(idx_height);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_kernel_size_supported(Size2D(input_width, input_height)), "Only 1x3, 3x1, 3x3 and 5x5 kernels are supported");
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
const Size2D &output_tile = winograd_info.output_tile_size;
- ARM_COMPUTE_RETURN_ERROR_ON(output_tile != Size2D(2U, 2U) && output_tile != Size2D(4U, 4U));
+ const std::array<Size2D, 4> supported_tile_sizes = { { Size2D(2U, 2U), Size2D(4U, 4U), Size2D(1U, 6U), Size2D(6U, 1U) } };
+ ARM_COMPUTE_RETURN_ERROR_ON(std::end(supported_tile_sizes) == std::find(std::begin(supported_tile_sizes), std::end(supported_tile_sizes), output_tile));
// Checks performed when output is configured
if(output->total_size() != 0)
@@ -98,8 +106,8 @@ Status validate_arguments_winograd_input_trans(const ITensorInfo *input, const I
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd input transform only supports unit strides");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((kernel_dims.width != 3U && kernel_dims.width != 5U), "Winograd input transform only supports 3x3 and 5x5 kernels");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((kernel_dims.width != kernel_dims.height), "Winograd input transform only supports 3x3 and 5x5 kernels");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_kernel_size_supported(Size2D(kernel_dims.width, kernel_dims.height)),
+ "Only 1x3, 3x1, 3x3 and 5x5 kernels are supported");
// Validate configured output
if(output->total_size() != 0)
@@ -151,9 +159,11 @@ Status validate_arguments_winograd_output_trans(const ITensorInfo *input, const
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != num_tiles.area());
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((kernel_dims.width != 3U && kernel_dims.width != 5U), "Winograd output transform only supports 3x3 and 5x5 kernels");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((kernel_dims.width != kernel_dims.height), "Winograd output transform only supports 3x3 and 5x5 kernels");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((input->dimension(2) != size_t(16U)) && (input->dimension(2) != size_t(36U))), "Only 2x2 and 4x4 output tile is supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_kernel_size_supported(Size2D(kernel_dims.width, kernel_dims.height)),
+ "Only 1x3, 3x1, 3x3 and 5x5 kernels are supported");
+
+ const std::array<unsigned int, 3> supported_gemm_sizes = { { 8U, 16U, 36U } };
+ ARM_COMPUTE_RETURN_ERROR_ON(std::end(supported_gemm_sizes) == std::find(std::begin(supported_gemm_sizes), std::end(supported_gemm_sizes), input->dimension(2)));
ARM_COMPUTE_UNUSED(kernel_dims);
if(bias != nullptr)
{
@@ -201,7 +211,21 @@ std::pair<Status, Window> validate_and_configure_window_winograd_output_trans(IT
}
} // namespace
-// Weights transform
+template <typename T>
+Status INEWinogradLayerTransformWeightsKernel<T>::validate(const ITensorInfo *input, const ITensorInfo *weights)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ const DataLayout data_layout = input->data_layout();
+ const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_kernel_size_supported(Size2D(weights->dimension(width_idx), weights->dimension(height_idx))),
+ "Only 1x3, 3x1, 3x3 and 5x5 kernels are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
+ return Status{};
+}
+
+template class INEWinogradLayerTransformWeightsKernel<float>;
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
unsigned int NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_weight_storage_size(int num_output_channels, int num_input_channels) const
@@ -278,6 +302,8 @@ Status NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols,
template class NEWinogradLayerTransformWeightsKernel<float, 2, 2, 3, 3>;
template class NEWinogradLayerTransformWeightsKernel<float, 4, 4, 3, 3>;
template class NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>;
+template class NEWinogradLayerTransformWeightsKernel<float, 1, 6, 1, 3>;
+template class NEWinogradLayerTransformWeightsKernel<float, 6, 1, 3, 1>;
// Input transform
@@ -343,14 +369,15 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- const int element_size_in_bytes = _input_nhwc->info()->element_size();
- const int input_col_stride = _input_nhwc->info()->strides_in_bytes().y() / element_size_in_bytes;
- const int input_row_stride = _input_nhwc->info()->strides_in_bytes().z() / element_size_in_bytes;
- const int input_batch_stride = _input_nhwc->info()->strides_in_bytes()[3] / element_size_in_bytes;
-
- InputTransform input_transform(reinterpret_cast<const T *>(_input_nhwc->buffer() + _input_nhwc->info()->offset_first_element_in_bytes()),
+ const int element_size_in_bytes = _input_nhwc->info()->element_size();
+ const int input_col_stride = _input_nhwc->info()->strides_in_bytes().y() / element_size_in_bytes;
+ const int input_row_stride = _input_nhwc->info()->strides_in_bytes().z() / element_size_in_bytes;
+ const int input_batch_stride = _input_nhwc->info()->strides_in_bytes()[3] / element_size_in_bytes;
+ const auto input_nhwc_ptr = reinterpret_cast<const T *>(_input_nhwc->buffer() + _input_nhwc->info()->offset_first_element_in_bytes());
+ auto output_ptr = reinterpret_cast<T *>(_output->buffer() + _output->info()->offset_first_element_in_bytes());
+ InputTransform input_transform(input_nhwc_ptr,
_num_batches, _num_rows, _num_cols, _num_channels, _padding,
- reinterpret_cast<T *>(_output->buffer() + _output->info()->offset_first_element_in_bytes()),
+ output_ptr,
_matrix_stride, _num_channels, input_batch_stride, input_row_stride, input_col_stride);
// The code below cannot be moved to configure because biases hasn't been allocated at that point
@@ -371,6 +398,8 @@ Status NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Ke
template class NEWinogradLayerTransformInputKernel<float, 2, 2, 3, 3>;
template class NEWinogradLayerTransformInputKernel<float, 4, 4, 3, 3>;
template class NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>;
+template class NEWinogradLayerTransformInputKernel<float, 1, 6, 1, 3>;
+template class NEWinogradLayerTransformInputKernel<float, 6, 1, 3, 1>;
// Output transform
@@ -438,7 +467,6 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
Window win;
auto win_last = output_transform.get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
-
_output_nhwc->info()->set_valid_region(ValidRegion(Coordinates(), _output_nhwc->info()->tensor_shape()));
INEKernel::configure(win);
@@ -452,10 +480,14 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
ARM_COMPUTE_ERROR_ON_NULLPTR(_output_workspace);
ARM_COMPUTE_ERROR_ON_NULLPTR(_output_nhwc);
+ const int out_batch_stride = 0;
+ const int out_row_stride = _output_nhwc->info()->strides_in_bytes()[2] / sizeof(T);
+ const int out_col_stride = _output_nhwc->info()->strides_in_bytes()[1] / sizeof(T);
+
OutputTransform output_transform(reinterpret_cast<T *>(_output_workspace->buffer()), _matrix_stride, _matrix_row_stride,
(_biases ? reinterpret_cast<T *>(_biases->buffer() + _biases->info()->offset_first_element_in_bytes()) : nullptr),
reinterpret_cast<T *>(_output_nhwc->buffer() + _output_nhwc->info()->offset_first_element_in_bytes()),
- _num_batches, _num_rows, _num_cols, _num_channels, 0, _output_nhwc->info()->strides_in_bytes()[2] / sizeof(T), _output_nhwc->info()->strides_in_bytes()[1] / sizeof(T));
+ _num_batches, _num_rows, _num_cols, _num_channels, out_batch_stride, out_row_stride, out_col_stride);
// The code below cannot be moved to configure because biases hasn't been allocated at that point
const size_t fst = window.x().start();
@@ -478,5 +510,7 @@ Status NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, K
template class NEWinogradLayerTransformOutputKernel<float, 2, 2, 3, 3>;
template class NEWinogradLayerTransformOutputKernel<float, 4, 4, 3, 3>;
template class NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>;
+template class NEWinogradLayerTransformOutputKernel<float, 1, 6, 1, 3>;
+template class NEWinogradLayerTransformOutputKernel<float, 6, 1, 3, 1>;
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
index 6d8afc0def..97b2695d69 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
@@ -329,7 +329,7 @@ void Transform::process_tile(
template <>
template <>
-const Transform::TileFn Transform::tile_fns[2][2][max_pad_bottom][max_pad_right] =
+const Transform::TileFn Transform::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] =
{
{
{
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_5x5_fp32.cpp
index fd30b6118e..30c9463bb8 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_5x5_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_5x5_fp32.cpp
@@ -298,7 +298,7 @@ void Transform::process_tile(
template <>
template <>
-const Transform::TileFn Transform::tile_fns[2][2][max_pad_bottom][max_pad_right] =
+const Transform::TileFn Transform::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] =
{
{
{
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_4x4_3x3_fp32.cpp
index 04d1573e4c..7f93187132 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_4x4_3x3_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/input_4x4_3x3_fp32.cpp
@@ -326,7 +326,7 @@ void Transform::process_tile(
*/
template <>
template <>
-const Transform::TileFn Transform::tile_fns[2][2][max_pad_bottom][max_pad_right] =
+const Transform::TileFn Transform::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] =
{
{
{
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_6_3_fp32.cpp
new file mode 100644
index 0000000000..67e46499cd
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/input_6_3_fp32.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+
+
+namespace winograd
+{
+
+using Transform = WinogradGEMM<1, 6, 1, 3>::InputTransform<float>;
+
+template <>
+template <>
+int Transform::ops_performed(const Tensor4DShape &input_shape)
+{
+ (void) input_shape;
+ return 0; // TODO
+}
+
+template <>
+template <>
+template <int pad_top, int pad_left, int pad_bottom, int pad_right>
+void Transform::process_tile(
+ int n_channels,
+ const float* const input_base,
+ const int input_row_stride,
+ const int input_col_stride,
+ float* const matrix_base,
+ const int matrix_stride
+)
+{
+ (void) input_row_stride; // No rows over which to stride
+ constexpr int inner_tile_j = 8;
+ constexpr int cells_j = inner_tile_j - pad_right;
+
+ float *outptr = matrix_base;
+
+ // Get pointers into the input tile
+ const float *x_ptrs[inner_tile_j];
+ for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
+ {
+ x_ptrs[j] = input_base + xj*input_col_stride;
+ }
+
+ // Vectors used/computed in this kernel.
+ float x[inner_tile_j];
+ float U[inner_tile_j];
+
+ for (int j = 0; j < inner_tile_j; j++)
+ {
+ x[j] = 0.0f;
+ }
+
+ // Perform the Winograd input transformation for each channel in the input
+ // tensor.
+ int channels_remaining = n_channels;
+#ifdef __arm_any__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ float32x4_t x[inner_tile_j], U[inner_tile_j];
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = vdupq_n_f32(0.0f);
+ }
+
+ // Load x
+ for (int j = pad_left; j < cells_j; j++)
+ {
+ x[j] = vld1q_f32(x_ptrs[j]);
+ x_ptrs[j] += 4;
+ }
+
+ // Compute U = x . X
+ U[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
+ U[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
+ U[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
+ U[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
+ U[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
+ U[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
+ U[6] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
+ U[7] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
+
+ // Store the transformed vector
+ for (int j = 0; j < inner_tile_j; j++)
+ {
+ vst1q_f32(outptr + j*matrix_stride, U[j]);
+ }
+ outptr += 4;
+ }
+
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ float32x2_t x[inner_tile_j], U[inner_tile_j];
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ x[j] = vdup_n_f32(0.0f);
+ }
+
+ // Load x
+ for (int j = pad_left; j < cells_j; j++)
+ {
+ x[j] = vld1_f32(x_ptrs[j]);
+ x_ptrs[j] += 2;
+ }
+
+ // Compute U = x . X
+ U[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
+ U[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
+ U[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
+ U[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
+ U[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
+ U[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
+ U[6] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
+ U[7] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
+
+ // Store the transformed vector
+ for (int j = 0; j < inner_tile_j; j++)
+ {
+ vst1_f32(outptr + j*matrix_stride, U[j]);
+ }
+ outptr += 2;
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Load x
+ for (int j = pad_left; j < cells_j; j++)
+ {
+ x[j] = *(x_ptrs[j]++);
+ }
+
+ // Compute U = x . X
+ U[0] = x[0]*-36 + x[4]*-14 + x[2]*49 + x[6]*1;
+ U[1] = x[5]*-1 + x[1]*-36 + x[4]*-13 + x[3]*13 + x[2]*36 + x[6]*1;
+ U[2] = x[3]*-13 + x[4]*-13 + x[1]*36 + x[2]*36 + x[5]*1 + x[6]*1;
+ U[3] = x[1]*-18 + x[4]*-10 + x[5]*-2 + x[2]*9 + x[3]*20 + x[6]*1;
+ U[4] = x[3]*-20 + x[4]*-10 + x[5]*2 + x[2]*9 + x[1]*18 + x[6]*1;
+ U[5] = x[1]*-12 + x[4]*-5 + x[5]*-3 + x[2]*4 + x[3]*15 + x[6]*1;
+ U[6] = x[3]*-15 + x[4]*-5 + x[5]*3 + x[2]*4 + x[1]*12 + x[6]*1;
+ U[7] = x[1]*-36 + x[5]*-14 + x[3]*49 + x[7]*1;
+
+ // Store the transformed vector
+ for (int j = 0; j < inner_tile_j; j++)
+ {
+ *(outptr + j*matrix_stride) = U[j];
+ }
+ outptr++;
+ }
+}
+
+template <>
+template <>
+const Transform::TileFn Transform::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] =
+{
+ {
+ {
+ {
+ Transform::template process_tile<0, 0, 0, 0>,
+ Transform::template process_tile<0, 0, 0, 1>,
+ Transform::template process_tile<0, 0, 0, 2>,
+ Transform::template process_tile<0, 0, 0, 3>,
+ Transform::template process_tile<0, 0, 0, 4>,
+ Transform::template process_tile<0, 0, 0, 5>,
+ Transform::template process_tile<0, 0, 0, 6>,
+ }
+ },
+ {
+ {
+ Transform::template process_tile<0, 1, 0, 0>,
+ Transform::template process_tile<0, 1, 0, 1>,
+ Transform::template process_tile<0, 1, 0, 2>,
+ Transform::template process_tile<0, 1, 0, 3>,
+ Transform::template process_tile<0, 1, 0, 4>,
+ Transform::template process_tile<0, 1, 0, 5>,
+ Transform::template process_tile<0, 1, 0, 6>,
+ }
+ }
+ }
+};
+
+template <int x, int y>
+using TransformTransposed = typename WinogradGEMM<x, 1, y, 1>::template InputTransform<float>;
+
+template <>
+template <>
+const TransformTransposed<6, 3>::TileFn
+ TransformTransposed<6, 3>::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] = {};
+
+template <>
+template <>
+const TransformTransposed<4, 5>::TileFn
+ TransformTransposed<4, 5>::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] = {};
+
+template <>
+template <>
+const TransformTransposed<2, 7>::TileFn
+ TransformTransposed<2, 7>::tile_fns[n_pad_top][n_pad_left][n_pad_bottom][n_pad_right] = {};
+
+
+
+template struct WinogradGEMM<1, 6, 1, 3>::InputTransform<float>;
+template struct WinogradGEMM<6, 1, 3, 1>::InputTransform<float>;
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
new file mode 100644
index 0000000000..16667ccdb6
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+
+namespace winograd
+{
+
+using Transform = WinogradGEMM<1, 6, 1, 3>::OutputTransform<float>;
+using TransformTransposed = WinogradGEMM<6, 1, 3, 1>::OutputTransform<float>;
+
+template <>
+template <>
+int Transform::ops_performed(const Tensor4DShape &shape)
+{
+ (void) shape;
+ return 0; // TODO
+}
+
+template <>
+template <>
+template <int pad_bottom, int pad_right>
+void Transform::process_tile(
+ const int n_channels,
+ const float* const matrix_base,
+ const int matrix_stride,
+ const float* const biases,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ (void) output_row_stride;
+ constexpr int cells_j = output_tile_cols - pad_right;
+
+ // Construct a map to the output cells
+ float *outptrs[cells_j];
+ for (int j = 0; j < cells_j; j++)
+ {
+ outptrs[j] = output + j*output_col_stride;
+ }
+ const float *inptr = matrix_base;
+ const float *bptr = biases;
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __arm_any__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1q_f32(inptr + j*matrix_stride);
+ }
+ inptr += 4;
+
+ f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+ f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
+ f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
+ f[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 81), F[5], 81), F[4], 16), F[3], 16);
+ f[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 243), F[4], 32), F[3], -32), F[5], -243), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1q_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 4;
+ }
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1_f32(inptr + j*matrix_stride);
+ }
+ inptr += 2;
+
+ f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+ f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
+ f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
+ f[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 81), F[5], 81), F[4], 16), F[3], 16);
+ f[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 243), F[4], 32), F[3], -32), F[5], -243), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 2;
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = *(inptr + j*matrix_stride);
+ }
+ inptr++;
+
+ f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
+ f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
+ f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
+ f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1;
+ f[4] = F[3]*16 + F[4]*16 + F[5]*81 + F[6]*81 + F[1]*1 + F[2]*1;
+ f[5] = F[1]*-1 + F[5]*-243 + F[3]*-32 + F[4]*32 + F[6]*243 + F[2]*1 + F[7]*1;
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = *(bptr++);
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ *(outptrs[j]++) = f[j] + b;
+ }
+ }
+}
+
+template <>
+template <>
+const Transform::TileFn Transform::tile_fns[max_pad_bottom][max_pad_right] =
+{
+ {
+ Transform::template process_tile<0, 0>,
+ Transform::template process_tile<0, 1>,
+ Transform::template process_tile<0, 2>,
+ Transform::template process_tile<0, 3>,
+ Transform::template process_tile<0, 4>,
+ Transform::template process_tile<0, 5>,
+ },
+};
+
+template <>
+template <>
+const TransformTransposed::TileFn TransformTransposed::tile_fns[max_pad_bottom][max_pad_right] = {};
+
+
+template struct WinogradGEMM<1, 6, 1, 3>::OutputTransform<float>;
+template struct WinogradGEMM<6, 1, 3, 1>::OutputTransform<float>;
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
new file mode 100644
index 0000000000..c560aa8c8f
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
+
+
+namespace winograd
+{
+ template <>
+ template <>
+ void WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[3];
+ for (int j = 0; j < 3; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[3], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < 3; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36.0f;
+ V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
+ V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
+ V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
+ V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
+ V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
+ V[7] = (w[2]*1) / 1;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template <>
+ template <>
+ void WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Redirect to the 1xN implementation
+ WinogradGEMM<1, 6, 1, 3>::template WeightsTransform<float>::execute(
+ n_output_channels, n_input_channels, input, output, matrix_stride,
+ matrix_row_stride
+ );
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template struct WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>;
+ template struct WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>;
+}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
index a5d43024a4..d544fd5710 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
@@ -225,4 +225,7 @@ int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::
template class WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>;
template class WinogradGEMM<4, 4, 3, 3>::Convolution<float, float>;
+template class WinogradGEMM<1, 6, 1, 3>::Convolution<float, float>;
+template class WinogradGEMM<6, 1, 3, 1>::Convolution<float, float>;
+
template class WinogradGEMM<2, 2, 5, 5>::Convolution<float, float>;
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index 11bb2d881b..ff7934e23a 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -52,25 +52,14 @@ inline Tensor4DShape internal_get_input_shape(const arm_compute::ITensor *input)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
{
- const DataLayout data_layout = input->data_layout();
- const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
ARM_COMPUTE_UNUSED(output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 3 && weights->dimension(height_idx) != 5, "Only 3 and 5 kernels are supported");
- ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
-
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd layer only supports unit strides.");
-
if(biases != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
}
-
- return Status{};
+ return INEWinogradLayerTransformWeightsKernel<float>::validate(input, weights);
}
Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims)
@@ -85,7 +74,14 @@ Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims)
{
output_tile = Size2D(2U, 2U);
}
-
+ else if(kernel_dims == Size2D(1U, 3U))
+ {
+ output_tile = Size2D(1U, 6U);
+ }
+ else if(kernel_dims == Size2D(3U, 1U))
+ {
+ output_tile = Size2D(6U, 1U);
+ }
return output_tile;
}
@@ -94,7 +90,7 @@ bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_siz
// Check if we want to configure a Winograd configuration which requires fast math
using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
- std::vector<WinogradConfiguration> fast_math_winograd =
+ const std::vector<WinogradConfiguration> fast_math_winograd =
{
WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(5, 5)),
WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5))
@@ -149,48 +145,78 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
int n_gemms = 0;
int N_BLOCK = 0; // Size of block used by GEMM.
- switch(kernel_size.width)
+ const bool square_kernel = kernel_size.width == kernel_size.height;
+
+ if(square_kernel)
{
- case 3:
+ switch(kernel_size.width)
{
- if(input->info()->dimension(width_idx) > 4 && input->info()->dimension(height_idx) > 4)
+ case 3:
{
- using config = NEWinogradLayerConfiguration<float, float, 4, 4, 3, 3>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
- n_gemms = config::WinogradBase::N_GEMMS;
- N_BLOCK = config::WinogradConv::N_BLOCK;
+ if(input->info()->dimension(width_idx) > 4 && input->info()->dimension(height_idx) > 4)
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 4, 4, 3, 3>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
+ else
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 2, 2, 3, 3>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
+ break;
}
- else
+ case 5:
{
- using config = NEWinogradLayerConfiguration<float, float, 2, 2, 3, 3>;
+ using config = NEWinogradLayerConfiguration<float, float, 2, 2, 5, 5>;
transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Not supported.");
+ break;
}
- break;
}
- case 5:
+ }
+ else
+ {
+ if(kernel_size == Size2D(1, 3))
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 6, 1, 3, 1>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
+ else if(kernel_size == Size2D(3, 1))
{
- using config = NEWinogradLayerConfiguration<float, float, 2, 2, 5, 5>;
+ using config = NEWinogradLayerConfiguration<float, float, 1, 6, 1, 3>;
transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
- break;
}
- default:
+ else
{
ARM_COMPUTE_ERROR("Not supported.");
- break;
}
}
- const PaddingType use_padding_type = (conv_info.pad_left() != 0u) ? PADDING_SAME : PADDING_VALID;
+ const PaddingType use_padding_type = (conv_info.pad_top() != 0u || conv_info.pad_left() != 0) ? PADDING_SAME : PADDING_VALID;
const bool use_same_padding = use_padding_type == PADDING_SAME;
// Get convolved dimensions
@@ -357,12 +383,12 @@ void NEWinogradConvolutionLayer::run()
//Bring channels to the front as Winograd code expects the tensor to be in the format NHWC
_permute_input.run();
}
+
// Transform input tensor to the winograd domain
NEScheduler::get().schedule(_transform_input_kernel.get(), Window::DimX);
//Run 16 GEMMs in multiple threads, each kernel runs one or more GEMMs
_asm_glue.run();
-
// Transform output tensor to the spatial domain
NEScheduler::get().schedule(_transform_output_kernel.get(), Window::DimX);
@@ -391,9 +417,10 @@ Status NEWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->dimension(idx_width), input->dimension(idx_height));
- const Size2D kernel_size = Size2D(weights->dimension(idx_width), weights->dimension(idx_height));
- const Size2D output_tile = winograd_output_tile(input_dims, kernel_size);
+ const Size2D input_dims = Size2D(input->dimension(idx_width), input->dimension(idx_height));
+ const Size2D kernel_size = Size2D(weights->dimension(idx_width), weights->dimension(idx_height));
+ const Size2D output_tile = winograd_output_tile(input_dims, kernel_size);
+ const bool square_kernel = kernel_size.width == kernel_size.height;
// Check if the Winograd configuration requires fast math
if(!enable_fast_math)
@@ -411,90 +438,120 @@ Status NEWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape);
- switch(weights->dimension(idx_width))
+ if(square_kernel)
{
- case 3:
+ switch(weights->dimension(idx_width))
{
- if(input_dims.width > 4 && input_dims.height > 4)
+ case 3:
{
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 4, 3, 3>::validate(input, &input0, winograd_info)));
+ if(input_dims.width > 4 && input_dims.height > 4)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 4, 3, 3>::validate(input, &input0, winograd_info)));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 3, 3>::validate(input, &input0, winograd_info)));
+ }
+ break;
}
- else
+ case 5:
{
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 3, 3>::validate(input, &input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>::validate(input, &input0, winograd_info)));
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
+ break;
}
- break;
- }
- case 5:
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>::validate(input, &input0, winograd_info)));
- break;
- }
- default:
- {
- ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
- break;
}
- }
- // Validate filter transform
- const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
- const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
+ // Validate filter transform
+ const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
+ const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
- switch(weights->dimension(idx_width))
- {
- case 3:
+ switch(weights->dimension(idx_width))
{
- if(input_dims.width > 4 && input_dims.height > 4)
+ case 3:
{
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 4, 3, 3>::validate(weights, &input1, winograd_info)));
+ if(input_dims.width > 4 && input_dims.height > 4)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 4, 3, 3>::validate(weights, &input1, winograd_info)));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 3, 3>::validate(weights, &input1, winograd_info)));
+ }
+ break;
}
- else
+ case 5:
{
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 3, 3>::validate(weights, &input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>::validate(weights, &input1, winograd_info)));
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
+ break;
}
- break;
- }
- case 5:
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>::validate(weights, &input1, winograd_info)));
- break;
- }
- default:
- {
- ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
- break;
}
- }
- // Validate batched matrix multiply
- TensorShape batched_mm_output_shape = input0.tensor_shape();
- batched_mm_output_shape[0] = input1.tensor_shape()[0];
- const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
- switch(weights->dimension(idx_width))
- {
- case 3:
+ // Validate batched matrix multiply
+ TensorShape batched_mm_output_shape = input0.tensor_shape();
+ batched_mm_output_shape[0] = input1.tensor_shape()[0];
+ const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
+ switch(weights->dimension(idx_width))
{
- if(input_dims.width > 4 && input_dims.height > 4)
+ case 3:
{
- // Validate output transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 4, 3, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
+ if(input_dims.width > 4 && input_dims.height > 4)
+ {
+ // Validate output transform
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 4, 3, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
+ }
+ else
+ {
+ // Validate output transform
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 3, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
+ }
+ break;
}
- else
+ case 5:
{
// Validate output transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 3, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>::validate(&batched_mm_output, biases, output, winograd_info)));
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
+ break;
}
- break;
}
- case 5:
+ }
+ else
+ {
+ const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
+ const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
+ TensorShape batched_mm_output_shape = input0.tensor_shape();
+ batched_mm_output_shape[0] = input1.tensor_shape()[0];
+ const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
+
+ if(kernel_size == Size2D(3, 1))
{
- // Validate output transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>::validate(&batched_mm_output, biases, output, winograd_info)));
- break;
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 6, 1, 3>::validate(input, &input0, winograd_info)));
+ // Validate filter transform
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 6, 1, 3>::validate(weights, &input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 6, 1, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
}
- default:
+ else if(kernel_size == Size2D(1, 3))
{
- ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
- break;
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 6, 1, 3, 1>::validate(input, &input0, winograd_info)));
+ // Validate filter transform
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 6, 1, 3, 1>::validate(weights, &input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 6, 1, 3, 1>::validate(&batched_mm_output, biases, output, winograd_info)));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Kernel shape not supported");
}
}
// Validate Activation Layer
@@ -515,8 +572,8 @@ void NEWinogradConvolutionLayer::prepare()
// Transform weights
NEScheduler::get().schedule(_transform_weights_kernel.get(), Window::DimX);
- _weights_hwio.allocator()->free();
+ _weights_hwio.allocator()->free();
_is_prepared = true;
}
}
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 18072e0532..591af7b731 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -121,18 +121,97 @@ using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMath
TEST_SUITE(FP32)
+TEST_SUITE(Conv1x3)
FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
- datasets::SmallWinogradConvolutionLayer5x5Dataset()),
+ combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
framework::dataset::make("DataType", { DataType::F32 })),
ActivationFunctionsDataset),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+
+TEST_SUITE_END() // Conv1x3
+TEST_SUITE(Conv3x1)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x1Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, abs_tolerance_f32);
}
+TEST_SUITE_END() // Conv3x1
+
+TEST_SUITE(Conv3x3)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+TEST_SUITE_END() // Conv3x3
+
+TEST_SUITE(Conv5x5)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+
+TEST_SUITE_END() // Conv5x5
+
FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
datasets::SmallWinogradConvolutionLayer5x5Dataset()),