aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-05-04 11:45:13 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:51:37 +0000
commit79ffadebd8dff7eaecbcfa3a28106736f240f1c5 (patch)
treef55cd1fb8b6918bbf24a424bf20229565232615a
parent7f4a8191a0fff69ec6c819e8d785a2c780388feb (diff)
downloadComputeLibrary-79ffadebd8dff7eaecbcfa3a28106736f240f1c5.tar.gz
COMPMID-1112: Enabled multithreading transforms in Winograd.
Updated RSH code as well. Change-Id: I9452ff5c7f0ff0cd60b8c223cdd71077288eb0c1 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130177 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h2
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp94
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp75
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp110
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp12
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp301
-rw-r--r--tests/datasets/SmallConvolutionLayerDataset.h3
7 files changed, 145 insertions, 452 deletions
diff --git a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
index 9912076cd5..6b8866cb2e 100644
--- a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
@@ -145,7 +145,6 @@ public:
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
- bool is_parallelisable() const override;
/** Winograd base kernel */
using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelCols, KernelCols>;
@@ -309,7 +308,6 @@ public:
const int n_channels) override;
void run(const Window &window, const ThreadInfo &info) override;
- bool is_parallelisable() const override;
/** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformOutputKernel
*
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
index fc4b255a9c..13218030d2 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp
@@ -33,35 +33,38 @@ namespace winograd
int kernel_rows, int kernel_cols>
template <typename T>
void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::InputTransform<T>::execute(
- const T *inptr,
- const Tensor4DShape& input_shape,
- const PaddingType padding_type,
+ const T* const input, /** Input tensor data */
+ const int n_batches, /** Number of batches in input tensor. */
+ const int in_batch_stride, /** Stride between batches of the input. */
+ const int n_rows, /** Number of rows in input tensor. */
+ const int in_row_stride, /** Stride between rows of the input. */
+ const int n_cols, /** Number of columns in input tensor. */
+ const int in_col_stride, /** Stride between columns of the input. */
+ const int n_channels, /** Number of channels in input tensor. */
+ const PaddingType padding, /** Padding type. */
const int tile_M,
const int tile_N,
- T *outptr_base,
- const int matrix_stride,
- const int matrix_batch_stride,
- const int matrix_row_stride
+ T* const output, /** Base of output matrices. */
+ const int matrix_stride, /** Stride between output matrices. */
+ const int matrix_batch_stride, /** Stride between batches within the matrix. */
+ const int matrix_row_stride /** Stride within matrices. */
)
{
// Compute the padding required on each edge of the image
- const int pad_top = (padding_type == PADDING_SAME) ? (kernel_rows - 1) / 2 : 0;
- const int pad_left = (padding_type == PADDING_SAME) ? (kernel_cols - 1) / 2 : 0;
+ const int pad_top = (padding == PADDING_SAME) ? (kernel_rows - 1) / 2 : 0;
+ const int pad_left = (padding == PADDING_SAME) ? (kernel_cols - 1) / 2 : 0;
const int tile_overlap = kernel_rows - 1;
// Compute striding values (assuming NHWC ordered data)
- const int input_col_stride = input_shape.n_channels;
- const int input_row_stride = input_shape.n_cols * input_col_stride;
- const int input_batch_stride = input_shape.n_rows * input_row_stride;
const int output_col_stride = matrix_row_stride;
const int output_row_stride = tile_N * output_col_stride;
// Loop over batches
- for (int batch = 0; batch < input_shape.n_batches; batch++)
+ for (int batch = 0; batch < n_batches; batch++)
{
// Pointer to the batch
- const T* const input_base_batch = inptr + batch * input_batch_stride;
- T* const outptr_base_batch = outptr_base + batch * matrix_batch_stride;
+ const T* const input_base_batch = input + batch * in_batch_stride;
+ T* const outptr_base_batch = output + batch * matrix_batch_stride;
// Loop over rows of tiles
for (int tile_i = 0; tile_i < tile_M; tile_i++)
@@ -69,7 +72,7 @@ namespace winograd
// Pointer to the row
const int row_offset = (tile_i == 0) ? 0 : pad_top;
const T* const input_base_row = (
- input_base_batch + ((inner_tile_rows - (kernel_rows - 1))*tile_i - row_offset)*input_row_stride
+ input_base_batch + ((inner_tile_rows - (kernel_rows - 1))*tile_i - row_offset)*in_row_stride
);
T* const outptr_base_row = outptr_base_batch + tile_i*output_row_stride;
@@ -77,14 +80,14 @@ namespace winograd
const int row_top = tile_i*(inner_tile_rows - tile_overlap) - pad_top;
const int row_bottom = row_top + inner_tile_rows;
const int row_pad_top = (tile_i == 0) ? pad_top : 0;
- const int row_pad_bottom = (row_bottom <= input_shape.n_rows) ? 0 : row_bottom - input_shape.n_rows;
+ const int row_pad_bottom = (row_bottom <= n_rows) ? 0 : row_bottom - n_rows;
// Process the row
process_tile_row(
- tile_N, input_shape.n_channels,
- input_base_row, input_row_stride, input_col_stride,
+ tile_N, n_channels,
+ input_base_row, in_row_stride, in_col_stride,
outptr_base_row, matrix_stride, matrix_row_stride,
- row_pad_top, pad_left, row_pad_bottom, input_shape.n_cols
+ row_pad_top, pad_left, row_pad_bottom, n_cols
);
}
}
@@ -152,7 +155,10 @@ namespace winograd
const PaddingType padding, /** Padding type. */
T* const output, /** Base of output matrices. */
const int matrix_stride, /** Stride between output matrices. */
- const int matrix_row_stride /** Stride within matrices. */
+ const int matrix_row_stride, /** Stride within matrices. */
+ const int in_batch_stride, /** Stride between input batches. */
+ const int in_row_stride, /** Stride between input rows. */
+ const int in_col_stride /** Stride between input columns. */
) : _inptr(input), _outptr(output),
_n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
_matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride),
@@ -160,6 +166,9 @@ namespace winograd
output_tile_rows)),
_tiles_N(iceildiv((padding == PADDING_SAME) ? n_cols : n_cols - kc + 1,
output_tile_cols)),
+ _in_col_stride(in_col_stride ? in_col_stride : n_channels),
+ _in_row_stride(in_row_stride ? in_row_stride : n_cols * _in_col_stride),
+ _in_batch_stride(in_batch_stride ? in_batch_stride : n_rows * _in_row_stride),
_padding_type(padding)
{
}
@@ -168,10 +177,9 @@ namespace winograd
template <typename T>
unsigned int WinogradGEMM<otr, otc, kr, kc>::InputTransform<T>::get_window() const
{
- // TODO When the input transform supports multithreading, return the total
- // number of tile rows (allowing for multiple batches). For now we return 1
- // to indicate that the activations must be transformed as a single block.
- return 1; // TODO _tiles_M * _n_batches;
+ // The final window includes the tail, all other windows will be a multiple
+ // of the window block in size.
+ return iceildiv(_n_channels, WINDOW_BLOCK);
}
template <int otr, int otc, int kr, int kc>
@@ -180,18 +188,32 @@ namespace winograd
const unsigned int start, const unsigned int stop
)
{
- // TODO When the input transform supports multithreading call execute for a
- // portion of the tile rows.
- (void) start;
- (void) stop;
-
- // For now, just do all of the work.
- const Tensor4DShape input_shape = {
- _n_batches, _n_rows, _n_cols, _n_channels, NHWC
- };
+ if (start >= get_window())
+ {
+ return;
+ }
+
+ // Determine the window of work to perform
+ const unsigned int start_channel = start * WINDOW_BLOCK;
+ const unsigned int stop_channel = std::min<const unsigned int>(
+ stop * WINDOW_BLOCK, _n_channels
+ );
+ const unsigned int n_channels = stop_channel - start_channel;
+
+ // Perform the work
execute(
- _inptr, input_shape, _padding_type, _tiles_M, _tiles_N, _outptr,
- _matrix_stride, _matrix_row_stride * _tiles_M * _tiles_N, _matrix_row_stride
+ _inptr + start_channel,
+ _n_batches, _in_batch_stride,
+ _n_rows, _in_row_stride,
+ _n_cols, _in_col_stride,
+ n_channels,
+ _padding_type,
+ _tiles_M,
+ _tiles_N,
+ _outptr + start_channel,
+ _matrix_stride,
+ _matrix_row_stride * _tiles_M * _tiles_N,
+ _matrix_row_stride
);
}
}
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
index 401b2816be..700ca76c68 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
@@ -23,7 +23,7 @@
*/
#pragma once
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "../winograd_gemm.hpp"
namespace winograd
{
@@ -31,7 +31,13 @@ namespace winograd
int kernel_rows, int kernel_cols>
template <typename T>
void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::OutputTransform<T>::execute(
- const Tensor4DShape &output_shape,
+ const int n_batches,
+ const int output_batch_stride,
+ const int n_rows,
+ const int output_row_stride,
+ const int n_cols,
+ const int output_col_stride,
+ const int n_channels,
const T* const matrix_base,
const int matrix_stride,
const int matrix_row_stride,
@@ -41,19 +47,16 @@ namespace winograd
{
// Compute the number of tiles and hence the padding required on the bottom
// and right of the image.
- const int tile_M = iceildiv(output_shape.n_rows, output_tile_rows);
- const int tile_N = iceildiv(output_shape.n_cols, output_tile_cols);
- const int pad_bottom = output_tile_rows*tile_M - output_shape.n_rows;
- const int pad_right = output_tile_cols*tile_N - output_shape.n_cols;
+ const int tile_M = iceildiv(n_rows, output_tile_rows);
+ const int tile_N = iceildiv(n_cols, output_tile_cols);
+ const int pad_bottom = output_tile_rows*tile_M - n_rows;
+ const int pad_right = output_tile_cols*tile_N - n_cols;
const int matrix_tile_row_stride = tile_N * matrix_row_stride;
const int matrix_batch_stride = tile_M * matrix_tile_row_stride;
- const int output_col_stride = output_shape.n_channels;
- const int output_row_stride = output_shape.n_cols * output_col_stride;
- const int output_batch_stride = output_shape.n_rows * output_row_stride;
// Perform the output transformation for each batch
- for (int batch = 0; batch < output_shape.n_batches; batch++)
+ for (int batch = 0; batch < n_batches; batch++)
{
// Get batch offset for input and outputs.
const T* const matrix_batch = matrix_base + batch*matrix_batch_stride;
@@ -69,7 +72,7 @@ namespace winograd
// Process the row
process_tile_row(
- tile_N, output_shape.n_channels, matrix_tile_row, matrix_stride,
+ tile_N, n_channels, matrix_tile_row, matrix_stride,
matrix_row_stride, biases,
outptr_row, output_row_stride, output_col_stride, row_pad_bottom,
pad_right
@@ -139,12 +142,18 @@ namespace winograd
const int n_batches,
const int n_rows,
const int n_cols,
- const int n_channels
+ const int n_channels,
+ const int out_batch_stride,
+ const int out_row_stride,
+ const int out_col_stride
) : _matrix_base(matrix_base), _biases(biases),
_matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride),
_outptr(output), _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols),
_n_channels(n_channels), _tile_M(iceildiv(n_rows, output_tile_rows)),
- _tile_N(iceildiv(n_cols, output_tile_cols))
+ _tile_N(iceildiv(n_cols, output_tile_cols)),
+ _out_col_stride(out_col_stride ? out_col_stride : n_channels),
+ _out_row_stride(out_row_stride ? out_row_stride : n_cols * _out_col_stride),
+ _out_batch_stride(out_batch_stride ? out_batch_stride : n_rows * _out_row_stride)
{
}
@@ -152,10 +161,9 @@ namespace winograd
template <typename T>
unsigned int WinogradGEMM<otr, otc, kr, kc>::OutputTransform<T>::get_window() const
{
- // TODO When the output transform supports multithreading, return the total
- // number of tile rows (allowing for multiple batches). For now we return 1
- // to indicate that the activations must be transformed as a single block.
- return 1; // TODO _tile_M * _n_batches;
+ // The final window includes the tail, all other windows will be a multiple
+ // of the window block in size.
+ return iceildiv(_n_channels, WINDOW_BLOCK);
}
template <int otr, int otc, int kr, int kc>
@@ -164,18 +172,31 @@ namespace winograd
const unsigned int start, const unsigned int stop
)
{
- // TODO When the output transform supports multithreading call execute for a
- // portion of the tile rows.
- (void) start;
- (void) stop;
+ if (start >= get_window())
+ {
+ return;
+ }
+
+ // Determine the window of work to perform
+ const unsigned int start_channel = start * WINDOW_BLOCK;
+ const unsigned int stop_channel = std::min<const unsigned int>(
+ stop * WINDOW_BLOCK, _n_channels
+ );
+ const unsigned int n_channels = stop_channel - start_channel;
- // For now, just do all of the work.
- const Tensor4DShape output_shape = {
- _n_batches, _n_rows, _n_cols, _n_channels, NHWC
- };
execute(
- output_shape, _matrix_base, _matrix_stride, _matrix_row_stride, _biases,
- _outptr
+ _n_batches,
+ _out_batch_stride,
+ _n_rows,
+ _out_row_stride,
+ _n_cols,
+ _out_col_stride,
+ n_channels,
+ _matrix_base + start_channel,
+ _matrix_stride,
+ _matrix_row_stride,
+ (_biases)?(_biases + start_channel):(nullptr),
+ _outptr + start_channel
);
}
} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
index dd67e97035..bc067fd07a 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
@@ -31,6 +31,7 @@
#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
+
#include <thread>
#include <utility>
#include <vector>
@@ -135,15 +136,21 @@ class WinogradGEMM
/** Apply the transform to a tensor. */
static void execute(
- const T *inptr,
- const Tensor4DShape& input_shape,
- const PaddingType padding_type,
+ const T* const input, /** Input tensor data */
+ const int n_batches, /** Number of batches in input tensor. */
+ const int in_batch_stride, /** Stride between batches of the input. */
+ const int n_rows, /** Number of rows in input tensor. */
+ const int in_row_stride, /** Stride between rows of the input. */
+ const int n_cols, /** Number of columns in input tensor. */
+ const int in_col_stride, /** Stride between columns of the input. */
+ const int n_channels, /** Number of channels in input tensor. */
+ const PaddingType padding, /** Padding type. */
const int tile_M,
const int tile_N,
- T *outptr_base,
- const int matrix_stride,
- const int matrix_batch_stride,
- const int matrix_row_stride
+ T* const output, /** Base of output matrices. */
+ const int matrix_stride, /** Stride between output matrices. */
+ const int matrix_batch_stride, /** Stride between batches within the matrix. */
+ const int matrix_row_stride /** Stride within matrices. */
);
/***********************************************************************/
@@ -159,11 +166,15 @@ class WinogradGEMM
const PaddingType padding, /** Padding type. */
T* const output, /** Base of output matrices. */
const int matrix_stride, /** Stride between output matrices. */
- const int matrix_row_stride /** Stride within matrices. */
+ const int matrix_row_stride, /** Stride within matrices. */
+ const int in_batch_stride=0, /** Stride between input batches. */
+ const int in_row_stride=0, /** Stride between input rows. */
+ const int in_col_stride=0 /** Stride between input columns. */
);
- /** Get the winodw of work a given operator can perform. */
+ /** Get the window of work a given operator can perform. */
unsigned int get_window() const;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
/** Perform work upon a window of the input. */
void run(const unsigned int start, const unsigned int stop);
@@ -201,6 +212,7 @@ class WinogradGEMM
T* const _outptr;
const int _n_batches, _n_rows, _n_cols, _n_channels, _matrix_stride,
_matrix_row_stride, _tiles_M, _tiles_N;
+ const int _in_col_stride, _in_row_stride, _in_batch_stride;
const PaddingType _padding_type;
};
@@ -220,7 +232,13 @@ class WinogradGEMM
/** Apply the transform to create a tensor. */
static void execute(
- const Tensor4DShape &output_shape,
+ const int n_batches,
+ const int out_batch_stride,
+ const int n_rows,
+ const int out_row_stride,
+ const int n_cols,
+ const int out_col_stride,
+ const int n_channels,
const T* const matrix_base,
const int matrix_stride,
const int matrix_row_stride,
@@ -241,11 +259,15 @@ class WinogradGEMM
const int n_batches, /** Number of batches in output tensor. */
const int n_rows, /** Number of rows in output tensor. */
const int n_cols, /** Number of columns in output tensor. */
- const int n_channels /** Number of channels in output tensor. */
+ const int n_channels, /** Number of channels in output tensor. */
+ const int out_batch_stride=0, /** Output batch stride. */
+ const int out_row_stride=0, /** Output row stride. */
+ const int out_col_stride=0 /** Output column stride. */
);
/** Get the window of work a given operator can perform. */
unsigned int get_window() const;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
/** Perform work upon a window of the input. */
void run(const unsigned int start, const unsigned int stop);
@@ -284,6 +306,7 @@ class WinogradGEMM
const int _matrix_stride, _matrix_row_stride;
T* const _outptr;
const int _n_batches, _n_rows, _n_cols, _n_channels, _tile_M, _tile_N;
+ const int _out_col_stride, _out_row_stride, _out_batch_stride;
};
/** Perform a convolution.
@@ -296,54 +319,6 @@ class WinogradGEMM
typedef TOut OutputType;
typedef TIn InputType;
- /** Create a new Winograd operator. */
- Convolution(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding,
- void *kernel_storage=NULL
- );
-
- Convolution(const Convolution&) = delete;
- Convolution operator=(const Convolution&) = delete;
-
- /** Create a new Winograd operator and initialise the weights. */
- Convolution(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding,
- const TIn* const kernel,
- void *kernel_storage=NULL,
- void *transform_working_space=NULL
- );
-
- /** Clean up a convolution engine. */
- ~Convolution();
-
- /** Transform the weights into the Winograd domain. */
- template <typename WeightsTransform=WeightsTransform<TIn>>
- void transform_weights(
- const TIn* const kernel,
- void *transform_working_space=NULL
- );
-
- /* Apply the Winograd operator to some input. */
- void execute(
- TOut* const output,
- const TIn* const input,
- const TOut* const biases,
- void* working_space=NULL,
- const int n_threads=1
- );
-
- /* Apply the Winograd operator to some input. */
- void execute(
- TOut* const output,
- const TIn* const input,
- const TOut* const biases,
- const int n_threads
- );
-
/** Get the output shape of a convolution. */
static Tensor4DShape get_output_shape(
const KernelShape &kernel_shape,
@@ -421,23 +396,6 @@ class WinogradGEMM
static constexpr int M_BLOCK = 4; /** Size of block used by GEMM. */
static constexpr int N_BLOCK = 16; /** Size of block used by GEMM. */
-
- private:
- const KernelShape kernel_shape; /** Shape of the kernel to be applied. */
- TIn *kernel_matrices[N_GEMMS]; /** Pointers into the kernel matrices. */
- const int kernel_matrix_row_stride; /** Stride within the kernel matrices. */
-
- const bool manage_kernel_storage; /** Kernel storage is managed by the instance. */
- void* const _kernel_storage; /** Base pointer for kernel storage. */
-
- const Tensor4DShape input_shape; /** Shape of the input tensor. */
- const PaddingType padding; /** Padding applied by the operator. */
-
- const Tensor4DShape output_shape; /** Output shape produced by the operator. */
-
- const int tile_rows; /** Number of rows of tiles. */
- const int tile_cols; /** Number of columns of tiles. */
- const int M, K, N; /** Sizes of underlying fundamental matrix multiplications. */
};
};
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index fa76194529..7e82dc4ecd 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -433,12 +433,6 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
-bool NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::is_parallelisable() const
-{
- return false;
-}
-
-template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
Status NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_winograd_input_trans(input, output, winograd_info));
@@ -538,12 +532,6 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
-bool NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::is_parallelisable() const
-{
- return false;
-}
-
-template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
Status NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
const WinogradInfo &winograd_info)
{
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
index a0ecaea4d4..a5d43024a4 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
@@ -21,11 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <cstring>
#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
-
-#include <cstring>
-
using namespace winograd;
/** Get the output shape of a convolution. */
@@ -39,8 +37,8 @@ Tensor4DShape WinogradGEMM<kr, kc, itr, itc>::Convolution<TOut, TIn>::get_output
{
return Tensor4DShape {
in_shape.n_batches,
- (padding == PADDING_SAME) ? in_shape.n_rows : in_shape.n_rows - (kernel_rows - 1),
- (padding == PADDING_SAME) ? in_shape.n_cols : in_shape.n_cols - (kernel_cols - 1),
+ (padding == PADDING_SAME) ? in_shape.n_rows : in_shape.n_rows - (kernel_rows - 1),
+ (padding == PADDING_SAME) ? in_shape.n_cols : in_shape.n_cols - (kernel_cols - 1),
kernel_shape.n_output_channels,
in_shape.ordering
};
@@ -223,299 +221,6 @@ int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::
}
-/** Create a new Winograd operator. */
-template <int output_tile_rows, int output_tile_cols,
- int kernel_rows, int kernel_cols>
-template <typename TOut, typename TIn>
-WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::Convolution<TOut, TIn>::Convolution(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding,
- void *kernel_storage
-) : kernel_shape(kernel_shape), // Store the kernel shape
- kernel_matrix_row_stride(roundup(kernel_shape.n_output_channels, N_BLOCK)),
- manage_kernel_storage(kernel_storage == NULL),
- _kernel_storage(manage_kernel_storage ?
- ALLOCATE(get_kernel_storage_size(kernel_shape)) :
- kernel_storage),
- input_shape(input_shape),
- padding(padding),
- output_shape(get_output_shape(kernel_shape, input_shape, padding)),
- tile_rows(iceildiv(output_shape.n_rows, output_tile_rows)),
- tile_cols(iceildiv(output_shape.n_cols, output_tile_cols)),
- M(input_shape.n_batches * tile_rows * tile_cols),
- K(kernel_shape.n_input_channels),
- N(kernel_shape.n_output_channels)
-{
- // Create pointers to the kernel matrices
- const int kernel_matrix_size_bytes = get_kernel_matrix_size(kernel_shape);
- int8_t* const ks_bytes = reinterpret_cast<int8_t *>(_kernel_storage);
- for (int i = 0; i < N_GEMMS; i++) {
- kernel_matrices[i] = reinterpret_cast<TIn *>(
- ks_bytes + i*kernel_matrix_size_bytes);
- }
-}
-
-
-/** Create a new Winograd operator and initialise the weights. */
-template <int output_tile_rows, int output_tile_cols,
- int kernel_rows, int kernel_cols>
-template <typename TOut, typename TIn>
-WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::Convolution<TOut, TIn>::Convolution(
- const KernelShape &kernel_shape,
- const Tensor4DShape &input_shape,
- const PaddingType padding,
- const TIn* const kernel,
- void *kernel_storage,
- void *transform_working_space
-) : Convolution(kernel_shape, input_shape, padding, kernel_storage)
-{
- transform_weights(kernel, transform_working_space);
-}
-
-
-/** Clean up a convolution engine. */
-template <int output_tile_rows, int output_tile_cols, int kernel_rows, int kernel_cols>
-template <typename TOut, typename TIn>
-WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::
-Convolution<TOut, TIn>::~Convolution()
-{
- // If we were responsible for managing kernel storage ensure that it is
- // freed.
- if (manage_kernel_storage)
- {
- free(_kernel_storage);
- }
-}
-
-
-/** Transform weights into the Winograd domain and store them for later use/reuse. */
-template <int output_tile_rows, int output_tile_cols, int kernel_rows, int kernel_cols>
-template <typename TOut, typename TIn>
-template <typename WeightsTransformT>
-void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::
-Convolution<TOut, TIn>::transform_weights(
- const TIn* const kernel,
- void *transform_working_space
-)
-{
- // Allocate working space if it is required
- bool allocated_working_space = false;
- if (transform_working_space == NULL && // If no memory has been provided
- get_kernel_transform_working_size(kernel_shape) != 0) // And we need the space
- {
- allocated_working_space = true;
- transform_working_space = ALLOCATE(
- get_kernel_transform_working_size(kernel_shape)
- );
- }
-
- // The transformation methods only work on weights laid out in HWIO form, if
- // the weights are not in this form then we need to re-order them.
- const TIn *kernel_hwio = kernel;
- if (kernel_shape.ordering != HWIO)
- {
- kernel_hwio = reinterpret_cast<TIn *>(transform_working_space);
-
- // Re-order the weights from OIHW to HWIO
- reorder::ofm_ifm_h_w_to_h_w_ifm_ofm(
- kernel, const_cast<TIn *>(kernel_hwio),
- kernel_shape.n_output_channels,
- kernel_shape.n_input_channels,
- kernel_shape.n_rows,
- kernel_shape.n_cols
- );
- }
-
- const int kernel_matrix_size_bytes = get_kernel_matrix_size(kernel_shape);
- WeightsTransformT weights_transform(
- kernel_hwio, kernel_matrices[0],
- kernel_matrix_size_bytes / sizeof(TIn),
- kernel_matrix_row_stride,
- kernel_shape.n_output_channels,
- kernel_shape.n_input_channels
- );
-
- // Transform the weights into the Winograd domain
- weights_transform.run(0, weights_transform.get_window());
-
- // Free memory if we allocated it
- if (allocated_working_space)
- {
- free(transform_working_space);
- }
-}
-
-
-/** Perform a convolution. */
-template <int output_tile_rows, int output_tile_cols,
- int kernel_rows, int kernel_cols>
-template <typename TOut, typename TIn>
-void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::
-Convolution<TOut, TIn>::execute(
- TOut* const output,
- const TIn* const input,
- const TOut* const biases,
- void *working_space,
- const int n_threads
-)
-{
- const auto padding_type = padding;
- const auto input_shape = this->input_shape;
-
- // Allocate working space if none has been provided
- const bool manage_working_space = (working_space == NULL);
- if (manage_working_space)
- {
- const size_t ws_size = get_working_space_size(
- kernel_shape, input_shape, padding_type
- );
- working_space = ALLOCATE(ws_size * sizeof(int8_t));
- memset(working_space, 0x00, ws_size);
- }
- int8_t* const ws_bytes = reinterpret_cast<int8_t *>(working_space);
-
- // Split the working space into that required for 16 input matrices and
- // output matrices.
- TIn *input_matrices[N_GEMMS];
- TOut *output_matrices[N_GEMMS];
- const int in_matrix_stride_bytes = get_input_matrix_size(kernel_shape, input_shape, padding_type);
- const int out_matrix_stride_bytes = get_output_matrix_size(kernel_shape, input_shape, padding_type);
-
- for (int i = 0; i < N_GEMMS; i++)
- {
- input_matrices[i] = reinterpret_cast<TIn *>(
- ws_bytes + i*in_matrix_stride_bytes);
- output_matrices[i] = reinterpret_cast<TIn *>(
- ws_bytes + N_GEMMS*in_matrix_stride_bytes + i*out_matrix_stride_bytes);
- }
-
- // If we need to re-order the input and output tensors then the final chunk
- // of the working space can be used for this purpose.
- // TODO - Overlay the input reorder on top of the output matrices
- // - Overlay the output reorder on top of the input matrices
- // Reorder the input input form if it was not provided in this ordering.
- const TIn* input_nhwc = input;
- if (input_shape.ordering == NCHW)
- {
- input_nhwc = reinterpret_cast<TIn *>(
- ws_bytes + N_GEMMS*(in_matrix_stride_bytes + out_matrix_stride_bytes)
- );
-
- reorder::nchw_to_nhwc(
- input, const_cast<TIn *>(input_nhwc),
- input_shape.n_batches,
- input_shape.n_channels,
- input_shape.n_rows,
- input_shape.n_cols
- );
- }
-
- // Compute shape for the GEMM
- const auto output_shape = this->output_shape;
- int M = this->M;
- int K = this->K;
- int N = this->N;
-
- const int in_matrix_row_stride = K;
- const int out_matrix_row_stride = kernel_matrix_row_stride;
-
- InputTransform<TIn> input_transform(
- input_nhwc,
- input_shape.n_batches,
- input_shape.n_rows,
- input_shape.n_cols,
- input_shape.n_channels,
- padding_type,
- input_matrices[0],
- in_matrix_stride_bytes / sizeof(TIn),
- in_matrix_row_stride
- );
-
- // Transform the input into the Winograd domain
- input_transform.run(0, input_transform.get_window());
-
- // Perform the GEMMs
- const int kernel_matrix_stride_bytes = get_kernel_matrix_size(kernel_shape);
- BatchedBlockedGemm<M_BLOCK, N_BLOCK, TOut, TIn> gemms(
- N_GEMMS, M, K, N,
- in_matrix_stride_bytes / sizeof(TIn),
- in_matrix_row_stride,
- kernel_matrix_stride_bytes / sizeof(TIn),
- kernel_matrix_row_stride,
- out_matrix_stride_bytes / sizeof(TOut),
- out_matrix_row_stride,
- input_matrices[0],
- kernel_matrices[0],
- output_matrices[0]
- );
- for (unsigned int i = 0; i < gemms.get_window(); i++)
- {
- gemms.run(i, i+1);
- }
-
- // If the output tensor needs to be in NCHW form then store the NHWC output
- // tensor in temporary storage and then reorder. If the output tensor needs
- // to be in NHWC then just write straight to the output tensor.
- TOut *output_nhwc = output;
- if (input_shape.ordering == NCHW)
- {
- output_nhwc = reinterpret_cast<TOut *>(
- ws_bytes + N_GEMMS*(in_matrix_stride_bytes + out_matrix_stride_bytes)
- );
- }
-
- // Transform the output tensor from the Winograd domain to the spatial
- // domain.
- OutputTransform<TOut> output_transform(
- output_matrices[0],
- out_matrix_stride_bytes / sizeof(TOut),
- out_matrix_row_stride,
- biases,
- output_nhwc,
- output_shape.n_batches,
- output_shape.n_rows,
- output_shape.n_cols,
- output_shape.n_channels
- );
- output_transform.run(0, output_transform.get_window());
-
- // Reorder the output tensor if it is required to be in NCHW form.
- if (input_shape.ordering == NCHW)
- {
- reorder::nhwc_to_nchw(
- output_nhwc, output,
- output_shape.n_batches,
- output_shape.n_rows,
- output_shape.n_cols,
- output_shape.n_channels
- );
- }
-
- // Free working space if we were responsible for allocating it
- if (manage_working_space)
- {
- free(working_space);
- }
-}
-
-
-/** Perform a convolution. */
-template <int output_tile_rows, int output_tile_cols,
- int kernel_rows, int kernel_cols>
-template <typename TOut, typename TIn>
-void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::
-Convolution<TOut, TIn>::execute(
- TOut* const output,
- const TIn* const input,
- const TOut* const biases,
- const int n_threads
-)
-{
- execute(output, input, biases, NULL, n_threads);
-}
-
-
// Instantiate required implementations
template class WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>;
template class WinogradGEMM<4, 4, 3, 3>::Convolution<float, float>;
diff --git a/tests/datasets/SmallConvolutionLayerDataset.h b/tests/datasets/SmallConvolutionLayerDataset.h
index 8e34f0ab1a..fed36de3dd 100644
--- a/tests/datasets/SmallConvolutionLayerDataset.h
+++ b/tests/datasets/SmallConvolutionLayerDataset.h
@@ -42,7 +42,8 @@ class SmallWinogradConvolutionLayer3x3Dataset final : public ConvolutionLayerDat
public:
SmallWinogradConvolutionLayer3x3Dataset()
{
- // Kernel size 3
+ // Channel size big enough to force multithreaded execution of the input transform
+ add_config(TensorShape(8U, 8U, 32U), TensorShape(3U, 3U, 32U, 1U), TensorShape(1U), TensorShape(6U, 6U, 1U), PadStrideInfo(1, 1, 0, 0));
// Batch size 1
add_config(TensorShape(8U, 8U, 2U), TensorShape(3U, 3U, 2U, 1U), TensorShape(1U), TensorShape(6U, 6U, 1U), PadStrideInfo(1, 1, 0, 0));
// Batch size 4