aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-05-30 11:44:26 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:53:09 +0000
commit7df27869aff38b07b50e4fe589f6b2cf51954a92 (patch)
treef3e0fc514b6d90306de49dea28ad42a1144cb185 /src
parentc084f0d4d2ee94bedc31b5e04c2936c91cecf883 (diff)
downloadComputeLibrary-7df27869aff38b07b50e4fe589f6b2cf51954a92.tar.gz
COMPMID-1162: Enable NHWC data layout support for NEWinogradConvolutionLayer - part1
In this first part we reworked the configuration of the kernels as before we passed the raw pointer to the buffer within the configuration of the function Change-Id: I83d3cb64c562303093c7f0ae52395ecd080a5d52 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133560 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp140
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp78
2 files changed, 137 insertions, 81 deletions
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 672684d14f..cfd53d7082 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -309,9 +309,9 @@ template class NEWinogradLayerBatchedGEMMKernel<float, float, 2, 2, 5, 5>;
// Weights transform
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
-unsigned int NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_weight_storage_size(int n_output_channels, int n_input_channels) const
+unsigned int NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_weight_storage_size(int num_output_channels, int num_input_channels) const
{
- const KernelShape shape(n_output_channels, KernelRows, KernelCols, n_input_channels);
+ const KernelShape shape(num_output_channels, KernelRows, KernelCols, num_input_channels);
return static_cast<unsigned int>(
// WinogradConv returns the size in bytes, we divide by `sizeof(T)` to express that in units of T
WinogradConv::get_kernel_storage_size(shape) / sizeof(T));
@@ -319,7 +319,8 @@ unsigned int NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTile
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::NEWinogradLayerTransformWeightsKernel()
- : _transform()
+ : _weights_hwio(nullptr), _output(nullptr), _matrix_stride(0), _num_output_channels(0), _num_input_channels(0)
+
{
}
@@ -333,15 +334,20 @@ template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, in
void NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::configure(
const ITensor *weights_hwio,
T *const output,
- const int matrix_stride, /** Stride across matrices in the output. */
- const int n_output_channels, /** Number of filters. */
- const int n_input_channels) /** Number of channels in each filter. */
-{
- const int matrix_row_stride = roundup(n_output_channels, WinogradConv::N_BLOCK);
- _transform = support::cpp14::make_unique<WeightsTransform>(reinterpret_cast<T *>(weights_hwio->buffer()), output, matrix_stride, matrix_row_stride, n_output_channels,
- n_input_channels);
- Window win;
- auto win_last = _transform->get_window();
+ const int matrix_stride, /** Stride across matrices in the output. */
+ const int num_output_channels, /** Number of filters. */
+ const int num_input_channels) /** Number of channels in each filter. */
+{
+ _weights_hwio = weights_hwio;
+ _output = output;
+ _matrix_stride = matrix_stride;
+ _num_output_channels = num_output_channels;
+ _num_input_channels = num_input_channels;
+
+ const int matrix_row_stride = roundup(num_output_channels, WinogradConv::N_BLOCK);
+ WeightsTransform transform(nullptr, output, matrix_stride, matrix_row_stride, num_output_channels, num_input_channels);
+ Window win;
+ auto win_last = transform.get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
INEKernel::configure(win);
}
@@ -351,9 +357,12 @@ void NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, Ke
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- const size_t fst = window.x().start();
- const size_t lst = window.x().end();
- _transform->run(fst, lst);
+
+ const int matrix_row_stride = roundup(_num_output_channels, WinogradConv::N_BLOCK);
+ WeightsTransform transform(reinterpret_cast<T *>(_weights_hwio->buffer()), _output, _matrix_stride, matrix_row_stride, _num_output_channels, _num_input_channels);
+ const size_t fst = window.x().start();
+ const size_t lst = window.x().end();
+ transform.run(fst, lst);
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
@@ -379,16 +388,16 @@ template class NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>;
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
unsigned int NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_input_storage_size(
- int n_batches, /** Number of batches in the input tensor. */
- int n_channels, /** Number of feature maps in the input tensor. */
- int n_rows, /** Number of rows in each feature map. */
- int n_cols, /** Number of columns in each feature map. */
- bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ int num_batches, /* Number of batches in the input tensor. */
+ int num_channels, /* Number of feature maps in the input tensor. */
+ int num_rows, /* Number of rows in each feature map. */
+ int num_cols, /* Number of columns in each feature map. */
+ bool same_padding /* Use "SAME" padding, otherwise use "VALID". */
) const
{
// Construct shapes for the input and kernel tensors.
- const Tensor4DShape input_shape(n_batches, n_rows, n_cols, n_channels);
- const KernelShape kern_shape(1, KernelRows, KernelCols, n_channels);
+ const Tensor4DShape input_shape(num_batches, num_rows, num_cols, num_channels);
+ const KernelShape kern_shape(1, KernelRows, KernelCols, num_channels);
const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID;
// Return the size, converted into units of TIn
return static_cast<unsigned int>(WinogradConv::get_input_storage_size(kern_shape, input_shape, padding) / sizeof(T));
@@ -403,25 +412,32 @@ int NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kerne
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::NEWinogradLayerTransformInputKernel()
- : _transform()
+ : _input_nhwc(), _num_batches(0), _num_rows(0), _num_cols(0), _num_channels(0), _padding(), _output(nullptr), _matrix_stride(0)
{
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::configure(
- const T *const input, /** Input tensor data */
- const int n_batches, /** Number of batches in input tensor. */
- const int n_rows, /** Number of rows in input tensor. */
- const int n_cols, /** Number of columns in input tensor. */
- const int n_channels, /** Number of channels in input tensor. */
- const PaddingType padding, /** Padding type. */
- T *const output, /** Base of output matrices. */
- const int matrix_stride) /** Stride between output matrices. */
-{
- // _input_matrix_row_stride(n_input_channels),
- _transform = support::cpp14::make_unique<InputTransform>(input, n_batches, n_rows, n_cols, n_channels, padding, output, matrix_stride, n_channels);
- Window win;
- auto win_last = _transform->get_window();
+ const ITensor *input_nhwc,
+ const int num_batches, /* Number of batches in input tensor. */
+ const int num_rows, /* Number of rows in input tensor. */
+ const int num_cols, /* Number of columns in input tensor. */
+ const int num_channels, /* Number of channels in input tensor. */
+ const PaddingType padding, /* Padding type. */
+ T *const output, /* Base of output matrices. */
+ const int matrix_stride) /* Stride between output matrices. */
+{
+ _input_nhwc = input_nhwc;
+ _num_batches = num_batches;
+ _num_rows = num_rows;
+ _num_cols = num_cols;
+ _num_channels = num_channels;
+ _padding = padding;
+ _output = output;
+ _matrix_stride = matrix_stride;
+ InputTransform transform(nullptr, num_batches, num_rows, num_cols, num_channels, padding, output, matrix_stride, num_channels);
+ Window win;
+ auto win_last = transform.get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
INEKernel::configure(win);
}
@@ -431,9 +447,13 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+
+ InputTransform input_transform(reinterpret_cast<const T *>(_input_nhwc->buffer()), _num_batches, _num_rows, _num_cols, _num_channels, _padding, _output, _matrix_stride, _num_channels);
+
+ // The code below cannot be moved to configure because biases hasn't been allocated at that point
const size_t fst = window.x().start();
const size_t lst = window.x().end();
- _transform->run(fst, lst);
+ input_transform.run(fst, lst);
}
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
@@ -453,16 +473,16 @@ template class NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>;
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
unsigned int NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::get_output_storage_size(
- int n_batches, /** Number of batches in the output tensor. */
- int n_rows, /** Number of rows in each feature map of the input tensor. */
- int n_cols, /** Number of columns in each feature map of the input tensor. */
- int n_output_channels, /** Number of feature maps in the output tensor. */
- bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ int num_batches, /* Number of batches in the output tensor. */
+ int num_rows, /* Number of rows in each feature map of the input tensor. */
+ int num_cols, /* Number of columns in each feature map of the input tensor. */
+ int num_output_channels, /* Number of feature maps in the output tensor. */
+ bool same_padding /* Use "SAME" padding, otherwise use "VALID". */
) const
{
// Construct shapes for the input and kernel tensors.
- const Tensor4DShape input_shape(n_batches, n_rows, n_cols, 1);
- const KernelShape kern_shape(n_output_channels, KernelRows, KernelCols, 1);
+ const Tensor4DShape input_shape(num_batches, num_rows, num_cols, 1);
+ const KernelShape kern_shape(num_output_channels, KernelRows, KernelCols, 1);
const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID;
// Return the size, converted into units of TOut
@@ -472,7 +492,7 @@ unsigned int NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileC
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, KernelRows, KernelCols>::NEWinogradLayerTransformOutputKernel()
- : _biases(nullptr), _output_workspace(nullptr), _matrix_stride(0), _matrix_row_stride(0), _output(nullptr), _n_batches(0), _n_rows(0), _n_cols(0), _n_channels(0)
+ : _biases(nullptr), _output_workspace(nullptr), _matrix_stride(0), _matrix_row_stride(0), _output_nhwc(nullptr), _num_batches(0), _num_rows(0), _num_cols(0), _num_channels(0)
{
}
@@ -494,24 +514,24 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
const ITensor *biases,
const T *const output_workingspace,
const int matrix_stride,
- T *const output,
- const int n_batches,
- const int n_rows,
- const int n_cols,
- const int n_channels)
+ ITensor *const output_nhwc,
+ const int num_batches,
+ const int num_rows,
+ const int num_cols,
+ const int num_channels)
{
_biases = biases;
_output_workspace = output_workingspace;
_matrix_stride = matrix_stride;
- _matrix_row_stride = roundup(n_channels, WinogradConv::N_BLOCK);
- _output = output;
- _n_batches = n_batches;
- _n_rows = n_rows;
- _n_cols = n_cols;
- _n_channels = n_channels;
+ _matrix_row_stride = roundup(num_channels, WinogradConv::N_BLOCK);
+ _output_nhwc = output_nhwc;
+ _num_batches = num_batches;
+ _num_rows = num_rows;
+ _num_cols = num_cols;
+ _num_channels = num_channels;
// We don't have the biases buffer at this stage as it hasn't been allocated, we pass in nullptr OutputTransform is only used here to compute the window
- OutputTransform output_transform(_output_workspace, _matrix_stride, _matrix_row_stride, nullptr, _output, _n_batches, _n_rows, _n_cols, _n_channels);
+ OutputTransform output_transform(_output_workspace, _matrix_stride, _matrix_row_stride, nullptr, nullptr, _num_batches, _num_rows, _num_cols, _num_channels);
Window win;
auto win_last = output_transform.get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
@@ -524,11 +544,11 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_NULLPTR(_output_workspace);
- ARM_COMPUTE_ERROR_ON_NULLPTR(_output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_output_nhwc);
OutputTransform output_transform(_output_workspace, _matrix_stride, _matrix_row_stride,
- (_biases ? reinterpret_cast<T *>(_biases->buffer()) : nullptr), _output,
- _n_batches, _n_rows, _n_cols, _n_channels);
+ (_biases ? reinterpret_cast<T *>(_biases->buffer()) : nullptr), reinterpret_cast<T *>(_output_nhwc->buffer()),
+ _num_batches, _num_rows, _num_cols, _num_channels);
// The code below cannot be moved to configure because biases hasn't been allocated at that point
const size_t fst = window.x().start();
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index 1a9c72965b..d6bc5cfd9a 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -60,8 +60,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_UNUSED(output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON(data_layout != DataLayout::NCHW); // COMPMID-1162
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 3 && weights->dimension(height_idx) != 5, "Only 3 and 5 kernels are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON(data_layout != DataLayout::NCHW); // COMPMID-1287
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd layer only supports unit strides.");
@@ -107,6 +107,7 @@ bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_siz
return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end();
}
+
} //namespace
NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
@@ -218,33 +219,60 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
_output_nhwc.allocator()->init(info);
_output_nhwc.allocator()->allocate();
- // Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map]
- _permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 2U, 0U, 1U));
- _weights_hwio.allocator()->allocate();
-
- // configure the kernel to transform the input tensor from NCHW -> NHWC
- _permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U));
- _input_nhwc.allocator()->allocate();
-
const KernelShape kernel_shape({ out_channels, static_cast<int>(kernel_size.height), static_cast<int>(kernel_size.width), in_channels });
// Configure the InputTransform
const int input_matrix_stride = transform_input_kernel->get_matrix_stride(kernel_shape, in_shape, use_padding_type);
- transform_input_kernel->configure(reinterpret_cast<float *>(_input_nhwc.buffer()), in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type,
- reinterpret_cast<float *>(_input_workspace.buffer()), input_matrix_stride);
+
+ if(data_layout == DataLayout::NCHW)
+ {
+ // configure the kernel to transform the input tensor from NCHW -> NHWC
+ _permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U));
+ _input_nhwc.allocator()->allocate();
+ transform_input_kernel->configure(&_input_nhwc, in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type,
+ reinterpret_cast<float *>(_input_workspace.buffer()), input_matrix_stride);
+ }
+ else
+ {
+ transform_input_kernel->configure(_input, in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type,
+ reinterpret_cast<float *>(_input_workspace.buffer()), input_matrix_stride);
+ }
// Configure WeightsTransform
const int kernel_matrix_stride = transform_weights_kernel->get_matrix_stride(kernel_shape);
- transform_weights_kernel->configure(&_weights_hwio, reinterpret_cast<float *>(_kernel_storage.buffer()), kernel_matrix_stride, out_channels, in_channels);
+ if(data_layout == DataLayout::NCHW)
+ {
+ // Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map]
+ _permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 2U, 0U, 1U));
+
+ transform_weights_kernel->configure(&_weights_hwio, reinterpret_cast<float *>(_kernel_storage.buffer()), kernel_matrix_stride, out_channels, in_channels);
+ }
+ else
+ {
+ // Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map]
+ _permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 0U, 1U, 2U));
+
+ transform_weights_kernel->configure(&_weights_hwio, reinterpret_cast<float *>(_kernel_storage.buffer()), kernel_matrix_stride, out_channels, in_channels);
+ }
+ _weights_hwio.allocator()->allocate();
// Configure OutputTransform
//The biases tensor has not been allocated at this point in time, the output transform will add the biases to the final result in the run() method
const int output_matrix_stride = transform_output_kernel->get_matrix_stride(kernel_shape, in_shape, use_padding_type);
const auto output_shape(transform_output_kernel->get_output_shape(kernel_shape, in_shape, use_padding_type));
- transform_output_kernel->configure(biases, reinterpret_cast<float *>(_output_workspace.buffer()),
- output_matrix_stride, reinterpret_cast<float *>(_output_nhwc.buffer()),
- in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels);
+ if(data_layout == DataLayout::NCHW)
+ {
+ transform_output_kernel->configure(biases, reinterpret_cast<float *>(_output_workspace.buffer()),
+ output_matrix_stride, &_output_nhwc,
+ in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels);
+ }
+ else
+ {
+ transform_output_kernel->configure(biases, reinterpret_cast<float *>(_output_workspace.buffer()),
+ output_matrix_stride, _output,
+ in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels);
+ }
// Configure GEMM
const int tile_rows = iceildiv(output_shape.n_rows, output_tile.height);
@@ -293,14 +321,16 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
//Configure Activation Layer
_is_activationlayer_enabled = act_info.enabled();
- if(_is_activationlayer_enabled)
+ if(data_layout == DataLayout::NCHW && _is_activationlayer_enabled)
{
- _activationlayer_function.configure(output, nullptr, act_info);
+ _activationlayer_function.configure(_output, nullptr, act_info);
}
}
void NEWinogradConvolutionLayer::run()
{
+ const DataLayout data_layout = _input->info()->data_layout();
+
_memory_group.acquire();
if(!_reshaped_kernel)
{
@@ -308,9 +338,12 @@ void NEWinogradConvolutionLayer::run()
_permute_weights.run();
NEScheduler::get().schedule(_transform_weights_kernel.get(), Window::DimX);
}
- //Bring channels to the front as Winograd code expects the tensor to be in the format NHWC
- _permute_input.run();
+ if(data_layout == DataLayout::NCHW)
+ {
+ //Bring channels to the front as Winograd code expects the tensor to be in the format NHWC
+ _permute_input.run();
+ }
// Transform input tensor to the winograd domain
NEScheduler::get().schedule(_transform_input_kernel.get(), Window::DimX);
@@ -320,8 +353,11 @@ void NEWinogradConvolutionLayer::run()
// Transform output tensor to the spatial domain
NEScheduler::get().schedule(_transform_output_kernel.get(), Window::DimX);
- // Reorder the convoluted output to ACL's ordering NCHW
- _permute_output.run();
+ if(data_layout == DataLayout::NCHW)
+ {
+ // Reorder the convoluted output to ACL's ordering NCHW
+ _permute_output.run();
+ }
if(_is_activationlayer_enabled)
{