From 79ffadebd8dff7eaecbcfa3a28106736f240f1c5 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Fri, 4 May 2018 11:45:13 +0100 Subject: COMPMID-1112: Enabled multithreading transforms in Winograd. Updated RSH code as well. Change-Id: I9452ff5c7f0ff0cd60b8c223cdd71077288eb0c1 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130177 Tested-by: Jenkins Reviewed-by: Georgios Pinitas Reviewed-by: Anthony Barbier --- .../kernels/NEWinogradConvolutionLayerKernel.h | 2 - .../convolution/winograd/transforms/input.hpp | 94 +++++++++++------- .../convolution/winograd/transforms/output.hpp | 75 +++++++++----- .../kernels/convolution/winograd/winograd_gemm.hpp | 110 +++++++-------------- 4 files changed, 140 insertions(+), 141 deletions(-) (limited to 'arm_compute/core') diff --git a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h index 9912076cd5..6b8866cb2e 100644 --- a/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h @@ -145,7 +145,6 @@ public: // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; - bool is_parallelisable() const override; /** Winograd base kernel */ using WinogradBase = winograd::WinogradGEMM; @@ -309,7 +308,6 @@ public: const int n_channels) override; void run(const Window &window, const ThreadInfo &info) override; - bool is_parallelisable() const override; /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformOutputKernel * diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp index fc4b255a9c..13218030d2 100644 --- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp +++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp @@ -33,35 +33,38 @@ namespace winograd int kernel_rows, int kernel_cols> template void WinogradGEMM::InputTransform::execute( - const T *inptr, - const Tensor4DShape& input_shape, - const PaddingType padding_type, + const T* const input, /** Input tensor data */ + const int n_batches, /** Number of batches in input tensor. */ + const int in_batch_stride, /** Stride between batches of the input. */ + const int n_rows, /** Number of rows in input tensor. */ + const int in_row_stride, /** Stride between rows of the input. */ + const int n_cols, /** Number of columns in input tensor. */ + const int in_col_stride, /** Stride between columns of the input. */ + const int n_channels, /** Number of channels in input tensor. */ + const PaddingType padding, /** Padding type. */ const int tile_M, const int tile_N, - T *outptr_base, - const int matrix_stride, - const int matrix_batch_stride, - const int matrix_row_stride + T* const output, /** Base of output matrices. */ + const int matrix_stride, /** Stride between output matrices. */ + const int matrix_batch_stride, /** Stride between batches within the matrix. */ + const int matrix_row_stride /** Stride within matrices. */ ) { // Compute the padding required on each edge of the image - const int pad_top = (padding_type == PADDING_SAME) ? (kernel_rows - 1) / 2 : 0; - const int pad_left = (padding_type == PADDING_SAME) ? (kernel_cols - 1) / 2 : 0; + const int pad_top = (padding == PADDING_SAME) ? (kernel_rows - 1) / 2 : 0; + const int pad_left = (padding == PADDING_SAME) ? (kernel_cols - 1) / 2 : 0; const int tile_overlap = kernel_rows - 1; // Compute striding values (assuming NHWC ordered data) - const int input_col_stride = input_shape.n_channels; - const int input_row_stride = input_shape.n_cols * input_col_stride; - const int input_batch_stride = input_shape.n_rows * input_row_stride; const int output_col_stride = matrix_row_stride; const int output_row_stride = tile_N * output_col_stride; // Loop over batches - for (int batch = 0; batch < input_shape.n_batches; batch++) + for (int batch = 0; batch < n_batches; batch++) { // Pointer to the batch - const T* const input_base_batch = inptr + batch * input_batch_stride; - T* const outptr_base_batch = outptr_base + batch * matrix_batch_stride; + const T* const input_base_batch = input + batch * in_batch_stride; + T* const outptr_base_batch = output + batch * matrix_batch_stride; // Loop over rows of tiles for (int tile_i = 0; tile_i < tile_M; tile_i++) @@ -69,7 +72,7 @@ namespace winograd // Pointer to the row const int row_offset = (tile_i == 0) ? 0 : pad_top; const T* const input_base_row = ( - input_base_batch + ((inner_tile_rows - (kernel_rows - 1))*tile_i - row_offset)*input_row_stride + input_base_batch + ((inner_tile_rows - (kernel_rows - 1))*tile_i - row_offset)*in_row_stride ); T* const outptr_base_row = outptr_base_batch + tile_i*output_row_stride; @@ -77,14 +80,14 @@ namespace winograd const int row_top = tile_i*(inner_tile_rows - tile_overlap) - pad_top; const int row_bottom = row_top + inner_tile_rows; const int row_pad_top = (tile_i == 0) ? pad_top : 0; - const int row_pad_bottom = (row_bottom <= input_shape.n_rows) ? 0 : row_bottom - input_shape.n_rows; + const int row_pad_bottom = (row_bottom <= n_rows) ? 0 : row_bottom - n_rows; // Process the row process_tile_row( - tile_N, input_shape.n_channels, - input_base_row, input_row_stride, input_col_stride, + tile_N, n_channels, + input_base_row, in_row_stride, in_col_stride, outptr_base_row, matrix_stride, matrix_row_stride, - row_pad_top, pad_left, row_pad_bottom, input_shape.n_cols + row_pad_top, pad_left, row_pad_bottom, n_cols ); } } @@ -152,7 +155,10 @@ namespace winograd const PaddingType padding, /** Padding type. */ T* const output, /** Base of output matrices. */ const int matrix_stride, /** Stride between output matrices. */ - const int matrix_row_stride /** Stride within matrices. */ + const int matrix_row_stride, /** Stride within matrices. */ + const int in_batch_stride, /** Stride between input batches. */ + const int in_row_stride, /** Stride between input rows. */ + const int in_col_stride /** Stride between input columns. */ ) : _inptr(input), _outptr(output), _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels), _matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride), @@ -160,6 +166,9 @@ namespace winograd output_tile_rows)), _tiles_N(iceildiv((padding == PADDING_SAME) ? n_cols : n_cols - kc + 1, output_tile_cols)), + _in_col_stride(in_col_stride ? in_col_stride : n_channels), + _in_row_stride(in_row_stride ? in_row_stride : n_cols * _in_col_stride), + _in_batch_stride(in_batch_stride ? in_batch_stride : n_rows * _in_row_stride), _padding_type(padding) { } @@ -168,10 +177,9 @@ namespace winograd template unsigned int WinogradGEMM::InputTransform::get_window() const { - // TODO When the input transform supports multithreading, return the total - // number of tile rows (allowing for multiple batches). For now we return 1 - // to indicate that the activations must be transformed as a single block. - return 1; // TODO _tiles_M * _n_batches; + // The final window includes the tail, all other windows will be a multiple + // of the window block in size. + return iceildiv(_n_channels, WINDOW_BLOCK); } template @@ -180,18 +188,32 @@ namespace winograd const unsigned int start, const unsigned int stop ) { - // TODO When the input transform supports multithreading call execute for a - // portion of the tile rows. - (void) start; - (void) stop; - - // For now, just do all of the work. - const Tensor4DShape input_shape = { - _n_batches, _n_rows, _n_cols, _n_channels, NHWC - }; + if (start >= get_window()) + { + return; + } + + // Determine the window of work to perform + const unsigned int start_channel = start * WINDOW_BLOCK; + const unsigned int stop_channel = std::min( + stop * WINDOW_BLOCK, _n_channels + ); + const unsigned int n_channels = stop_channel - start_channel; + + // Perform the work execute( - _inptr, input_shape, _padding_type, _tiles_M, _tiles_N, _outptr, - _matrix_stride, _matrix_row_stride * _tiles_M * _tiles_N, _matrix_row_stride + _inptr + start_channel, + _n_batches, _in_batch_stride, + _n_rows, _in_row_stride, + _n_cols, _in_col_stride, + n_channels, + _padding_type, + _tiles_M, + _tiles_N, + _outptr + start_channel, + _matrix_stride, + _matrix_row_stride * _tiles_M * _tiles_N, + _matrix_row_stride ); } } diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp index 401b2816be..700ca76c68 100644 --- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp +++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp @@ -23,7 +23,7 @@ */ #pragma once -#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp" +#include "../winograd_gemm.hpp" namespace winograd { @@ -31,7 +31,13 @@ namespace winograd int kernel_rows, int kernel_cols> template void WinogradGEMM::OutputTransform::execute( - const Tensor4DShape &output_shape, + const int n_batches, + const int output_batch_stride, + const int n_rows, + const int output_row_stride, + const int n_cols, + const int output_col_stride, + const int n_channels, const T* const matrix_base, const int matrix_stride, const int matrix_row_stride, @@ -41,19 +47,16 @@ namespace winograd { // Compute the number of tiles and hence the padding required on the bottom // and right of the image. - const int tile_M = iceildiv(output_shape.n_rows, output_tile_rows); - const int tile_N = iceildiv(output_shape.n_cols, output_tile_cols); - const int pad_bottom = output_tile_rows*tile_M - output_shape.n_rows; - const int pad_right = output_tile_cols*tile_N - output_shape.n_cols; + const int tile_M = iceildiv(n_rows, output_tile_rows); + const int tile_N = iceildiv(n_cols, output_tile_cols); + const int pad_bottom = output_tile_rows*tile_M - n_rows; + const int pad_right = output_tile_cols*tile_N - n_cols; const int matrix_tile_row_stride = tile_N * matrix_row_stride; const int matrix_batch_stride = tile_M * matrix_tile_row_stride; - const int output_col_stride = output_shape.n_channels; - const int output_row_stride = output_shape.n_cols * output_col_stride; - const int output_batch_stride = output_shape.n_rows * output_row_stride; // Perform the output transformation for each batch - for (int batch = 0; batch < output_shape.n_batches; batch++) + for (int batch = 0; batch < n_batches; batch++) { // Get batch offset for input and outputs. const T* const matrix_batch = matrix_base + batch*matrix_batch_stride; @@ -69,7 +72,7 @@ namespace winograd // Process the row process_tile_row( - tile_N, output_shape.n_channels, matrix_tile_row, matrix_stride, + tile_N, n_channels, matrix_tile_row, matrix_stride, matrix_row_stride, biases, outptr_row, output_row_stride, output_col_stride, row_pad_bottom, pad_right @@ -139,12 +142,18 @@ namespace winograd const int n_batches, const int n_rows, const int n_cols, - const int n_channels + const int n_channels, + const int out_batch_stride, + const int out_row_stride, + const int out_col_stride ) : _matrix_base(matrix_base), _biases(biases), _matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride), _outptr(output), _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels), _tile_M(iceildiv(n_rows, output_tile_rows)), - _tile_N(iceildiv(n_cols, output_tile_cols)) + _tile_N(iceildiv(n_cols, output_tile_cols)), + _out_col_stride(out_col_stride ? out_col_stride : n_channels), + _out_row_stride(out_row_stride ? out_row_stride : n_cols * _out_col_stride), + _out_batch_stride(out_batch_stride ? out_batch_stride : n_rows * _out_row_stride) { } @@ -152,10 +161,9 @@ namespace winograd template unsigned int WinogradGEMM::OutputTransform::get_window() const { - // TODO When the output transform supports multithreading, return the total - // number of tile rows (allowing for multiple batches). For now we return 1 - // to indicate that the activations must be transformed as a single block. - return 1; // TODO _tile_M * _n_batches; + // The final window includes the tail, all other windows will be a multiple + // of the window block in size. + return iceildiv(_n_channels, WINDOW_BLOCK); } template @@ -164,18 +172,31 @@ namespace winograd const unsigned int start, const unsigned int stop ) { - // TODO When the output transform supports multithreading call execute for a - // portion of the tile rows. - (void) start; - (void) stop; + if (start >= get_window()) + { + return; + } + + // Determine the window of work to perform + const unsigned int start_channel = start * WINDOW_BLOCK; + const unsigned int stop_channel = std::min( + stop * WINDOW_BLOCK, _n_channels + ); + const unsigned int n_channels = stop_channel - start_channel; - // For now, just do all of the work. - const Tensor4DShape output_shape = { - _n_batches, _n_rows, _n_cols, _n_channels, NHWC - }; execute( - output_shape, _matrix_base, _matrix_stride, _matrix_row_stride, _biases, - _outptr + _n_batches, + _out_batch_stride, + _n_rows, + _out_row_stride, + _n_cols, + _out_col_stride, + n_channels, + _matrix_base + start_channel, + _matrix_stride, + _matrix_row_stride, + (_biases)?(_biases + start_channel):(nullptr), + _outptr + start_channel ); } } // namespace winograd diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp index dd67e97035..bc067fd07a 100644 --- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp +++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp @@ -31,6 +31,7 @@ #include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp" #include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp" + #include #include #include @@ -135,15 +136,21 @@ class WinogradGEMM /** Apply the transform to a tensor. */ static void execute( - const T *inptr, - const Tensor4DShape& input_shape, - const PaddingType padding_type, + const T* const input, /** Input tensor data */ + const int n_batches, /** Number of batches in input tensor. */ + const int in_batch_stride, /** Stride between batches of the input. */ + const int n_rows, /** Number of rows in input tensor. */ + const int in_row_stride, /** Stride between rows of the input. */ + const int n_cols, /** Number of columns in input tensor. */ + const int in_col_stride, /** Stride between columns of the input. */ + const int n_channels, /** Number of channels in input tensor. */ + const PaddingType padding, /** Padding type. */ const int tile_M, const int tile_N, - T *outptr_base, - const int matrix_stride, - const int matrix_batch_stride, - const int matrix_row_stride + T* const output, /** Base of output matrices. */ + const int matrix_stride, /** Stride between output matrices. */ + const int matrix_batch_stride, /** Stride between batches within the matrix. */ + const int matrix_row_stride /** Stride within matrices. */ ); /***********************************************************************/ @@ -159,11 +166,15 @@ class WinogradGEMM const PaddingType padding, /** Padding type. */ T* const output, /** Base of output matrices. */ const int matrix_stride, /** Stride between output matrices. */ - const int matrix_row_stride /** Stride within matrices. */ + const int matrix_row_stride, /** Stride within matrices. */ + const int in_batch_stride=0, /** Stride between input batches. */ + const int in_row_stride=0, /** Stride between input rows. */ + const int in_col_stride=0 /** Stride between input columns. */ ); - /** Get the winodw of work a given operator can perform. */ + /** Get the window of work a given operator can perform. */ unsigned int get_window() const; + static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window /** Perform work upon a window of the input. */ void run(const unsigned int start, const unsigned int stop); @@ -201,6 +212,7 @@ class WinogradGEMM T* const _outptr; const int _n_batches, _n_rows, _n_cols, _n_channels, _matrix_stride, _matrix_row_stride, _tiles_M, _tiles_N; + const int _in_col_stride, _in_row_stride, _in_batch_stride; const PaddingType _padding_type; }; @@ -220,7 +232,13 @@ class WinogradGEMM /** Apply the transform to create a tensor. */ static void execute( - const Tensor4DShape &output_shape, + const int n_batches, + const int out_batch_stride, + const int n_rows, + const int out_row_stride, + const int n_cols, + const int out_col_stride, + const int n_channels, const T* const matrix_base, const int matrix_stride, const int matrix_row_stride, @@ -241,11 +259,15 @@ class WinogradGEMM const int n_batches, /** Number of batches in output tensor. */ const int n_rows, /** Number of rows in output tensor. */ const int n_cols, /** Number of columns in output tensor. */ - const int n_channels /** Number of channels in output tensor. */ + const int n_channels, /** Number of channels in output tensor. */ + const int out_batch_stride=0, /** Output batch stride. */ + const int out_row_stride=0, /** Output row stride. */ + const int out_col_stride=0 /** Output column stride. */ ); /** Get the window of work a given operator can perform. */ unsigned int get_window() const; + static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window /** Perform work upon a window of the input. */ void run(const unsigned int start, const unsigned int stop); @@ -284,6 +306,7 @@ class WinogradGEMM const int _matrix_stride, _matrix_row_stride; T* const _outptr; const int _n_batches, _n_rows, _n_cols, _n_channels, _tile_M, _tile_N; + const int _out_col_stride, _out_row_stride, _out_batch_stride; }; /** Perform a convolution. @@ -296,54 +319,6 @@ class WinogradGEMM typedef TOut OutputType; typedef TIn InputType; - /** Create a new Winograd operator. */ - Convolution( - const KernelShape &kernel_shape, - const Tensor4DShape &input_shape, - const PaddingType padding, - void *kernel_storage=NULL - ); - - Convolution(const Convolution&) = delete; - Convolution operator=(const Convolution&) = delete; - - /** Create a new Winograd operator and initialise the weights. */ - Convolution( - const KernelShape &kernel_shape, - const Tensor4DShape &input_shape, - const PaddingType padding, - const TIn* const kernel, - void *kernel_storage=NULL, - void *transform_working_space=NULL - ); - - /** Clean up a convolution engine. */ - ~Convolution(); - - /** Transform the weights into the Winograd domain. */ - template > - void transform_weights( - const TIn* const kernel, - void *transform_working_space=NULL - ); - - /* Apply the Winograd operator to some input. */ - void execute( - TOut* const output, - const TIn* const input, - const TOut* const biases, - void* working_space=NULL, - const int n_threads=1 - ); - - /* Apply the Winograd operator to some input. */ - void execute( - TOut* const output, - const TIn* const input, - const TOut* const biases, - const int n_threads - ); - /** Get the output shape of a convolution. */ static Tensor4DShape get_output_shape( const KernelShape &kernel_shape, @@ -421,23 +396,6 @@ class WinogradGEMM static constexpr int M_BLOCK = 4; /** Size of block used by GEMM. */ static constexpr int N_BLOCK = 16; /** Size of block used by GEMM. */ - - private: - const KernelShape kernel_shape; /** Shape of the kernel to be applied. */ - TIn *kernel_matrices[N_GEMMS]; /** Pointers into the kernel matrices. */ - const int kernel_matrix_row_stride; /** Stride within the kernel matrices. */ - - const bool manage_kernel_storage; /** Kernel storage is managed by the instance. */ - void* const _kernel_storage; /** Base pointer for kernel storage. */ - - const Tensor4DShape input_shape; /** Shape of the input tensor. */ - const PaddingType padding; /** Padding applied by the operator. */ - - const Tensor4DShape output_shape; /** Output shape produced by the operator. */ - - const int tile_rows; /** Number of rows of tiles. */ - const int tile_cols; /** Number of columns of tiles. */ - const int M, K, N; /** Sizes of underlying fundamental matrix multiplications. */ }; }; -- cgit v1.2.1