/* * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/TensorInfo.h" #include "support/ToolchainSupport.h" namespace arm_compute { //Batched Gemms template NEWinogradLayerBatchedGEMMKernel::NEWinogradLayerBatchedGEMMKernel() : _gemms() { } template void NEWinogradLayerBatchedGEMMKernel::configure( const unsigned int n_gemms, const int M, const int K, const int N, const int a_matrix_stride, const int a_row_stride, const int b_matrix_stride, const int b_row_stride, const int c_matrix_stride, const int c_row_stride, const TIn *const a_ptr, const TIn *const b_ptr, TOut *const c_ptr) { _gemms = support::cpp14::make_unique(n_gemms, M, K, N, a_matrix_stride, a_row_stride, b_matrix_stride, b_row_stride, c_matrix_stride, c_row_stride, a_ptr, b_ptr, c_ptr); Window win; auto win_last = _gemms->get_window(); win.set(Window::DimX, Window::Dimension(0, win_last, 1)); INEKernel::configure(win); } template void NEWinogradLayerBatchedGEMMKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); const size_t first_gemm = window.x().start(); const size_t last_gemm = window.x().end(); _gemms->run(first_gemm, last_gemm); } template unsigned int NEWinogradLayerBatchedGEMMKernel::get_number_gemms() const { return WinogradBase::N_GEMMS; } template int NEWinogradLayerBatchedGEMMKernel::get_output_tile_rows() const { return _output_tile_rows; } template int NEWinogradLayerBatchedGEMMKernel::get_output_tile_cols() const { return _output_tile_cols; } template int NEWinogradLayerBatchedGEMMKernel::get_number_blocks() const { return WinogradConv::N_BLOCK; } template class NEWinogradLayerBatchedGEMMKernel; template class NEWinogradLayerBatchedGEMMKernel; // Weights transform template unsigned int NEWinogradLayerTransformWeightsKernel::get_weight_storage_size(int n_output_channels, int n_input_channels) const { const KernelShape shape(n_output_channels, KernelRows, KernelCols, n_input_channels); return static_cast( // WinogradConv returns the size in bytes, we divide by `sizeof(T)` to express that in units of T WinogradConv::get_kernel_storage_size(shape) / sizeof(T)); } template NEWinogradLayerTransformWeightsKernel::NEWinogradLayerTransformWeightsKernel() : _transform() { } template int NEWinogradLayerTransformWeightsKernel::get_matrix_stride(const KernelShape &kernel_shape) const { return WinogradConv::get_kernel_matrix_stride(kernel_shape); } template void NEWinogradLayerTransformWeightsKernel::configure( const ITensor *weights_hwio, T *const output, const int matrix_stride, /** Stride across matrices in the output. */ const int n_output_channels, /** Number of filters. */ const int n_input_channels) /** Number of channels in each filter. */ { const int matrix_row_stride = roundup(n_output_channels, WinogradConv::N_BLOCK); _transform = support::cpp14::make_unique(reinterpret_cast(weights_hwio->buffer()), output, matrix_stride, matrix_row_stride, n_output_channels, n_input_channels); Window win; auto win_last = _transform->get_window(); win.set(Window::DimX, Window::Dimension(0, win_last, 1)); INEKernel::configure(win); } template void NEWinogradLayerTransformWeightsKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); const size_t fst = window.x().start(); const size_t lst = window.x().end(); _transform->run(fst, lst); } template bool NEWinogradLayerTransformWeightsKernel::is_parallelisable() const { return false; } template class NEWinogradLayerTransformWeightsKernel; template class NEWinogradLayerTransformWeightsKernel; // Input transform template unsigned int NEWinogradLayerTransformInputKernel::get_input_storage_size( int n_batches, /** Number of batches in the input tensor. */ int n_channels, /** Number of feature maps in the input tensor. */ int n_rows, /** Number of rows in each feature map. */ int n_cols, /** Number of columns in each feature map. */ bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ ) const { // Construct shapes for the input and kernel tensors. const Tensor4DShape input_shape(n_batches, n_rows, n_cols, n_channels); const KernelShape kern_shape(1, KernelRows, KernelCols, n_channels); const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID; // Return the size, converted into units of TIn return static_cast(WinogradConv::get_input_storage_size(kern_shape, input_shape, padding) / sizeof(T)); } template int NEWinogradLayerTransformInputKernel::get_matrix_stride( const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const { return WinogradConv::get_input_matrix_stride(kernel_shape, input_shape, padding_type); } template NEWinogradLayerTransformInputKernel::NEWinogradLayerTransformInputKernel() : _transform() { } template void NEWinogradLayerTransformInputKernel::configure( const T *const input, /** Input tensor data */ const int n_batches, /** Number of batches in input tensor. */ const int n_rows, /** Number of rows in input tensor. */ const int n_cols, /** Number of columns in input tensor. */ const int n_channels, /** Number of channels in input tensor. */ const PaddingType padding, /** Padding type. */ T *const output, /** Base of output matrices. */ const int matrix_stride) /** Stride between output matrices. */ { // _input_matrix_row_stride(n_input_channels), _transform = support::cpp14::make_unique(input, n_batches, n_rows, n_cols, n_channels, padding, output, matrix_stride, n_channels); Window win; auto win_last = _transform->get_window(); win.set(Window::DimX, Window::Dimension(0, win_last, 1)); INEKernel::configure(win); } template void NEWinogradLayerTransformInputKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); const size_t fst = window.x().start(); const size_t lst = window.x().end(); _transform->run(fst, lst); } template bool NEWinogradLayerTransformInputKernel::is_parallelisable() const { return false; } template class NEWinogradLayerTransformInputKernel; template class NEWinogradLayerTransformInputKernel; // Output transform template unsigned int NEWinogradLayerTransformOutputKernel::get_output_storage_size( int n_batches, /** Number of batches in the output tensor. */ int n_rows, /** Number of rows in each feature map of the input tensor. */ int n_cols, /** Number of columns in each feature map of the input tensor. */ int n_output_channels, /** Number of feature maps in the output tensor. */ bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ ) const { // Construct shapes for the input and kernel tensors. const Tensor4DShape input_shape(n_batches, n_rows, n_cols, 1); const KernelShape kern_shape(n_output_channels, KernelRows, KernelCols, 1); const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID; // Return the size, converted into units of TOut return static_cast( WinogradConv::get_output_storage_size(kern_shape, input_shape, padding) / sizeof(T)); } template NEWinogradLayerTransformOutputKernel::NEWinogradLayerTransformOutputKernel() : _biases(nullptr), _output_workspace(nullptr), _matrix_stride(0), _matrix_row_stride(0), _output(nullptr), _n_batches(0), _n_rows(0), _n_cols(0), _n_channels(0) { } template int NEWinogradLayerTransformOutputKernel::get_matrix_stride( const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const { return WinogradConv::get_output_matrix_stride(kernel_shape, input_shape, padding_type); } template Tensor4DShape NEWinogradLayerTransformOutputKernel::get_output_shape( const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const { return WinogradConv::get_output_shape(kernel_shape, in_shape, padding); } template void NEWinogradLayerTransformOutputKernel::configure( const ITensor *biases, const T *const output_workingspace, const int matrix_stride, T *const output, const int n_batches, const int n_rows, const int n_cols, const int n_channels) { _biases = biases; _output_workspace = output_workingspace; _matrix_stride = matrix_stride; _matrix_row_stride = roundup(n_channels, WinogradConv::N_BLOCK); _output = output; _n_batches = n_batches; _n_rows = n_rows; _n_cols = n_cols; _n_channels = n_channels; // We don't have the biases buffer at this stage as it hasn't been allocated, we pass in nullptr OutputTransform is only used here to compute the window OutputTransform output_transform(_output_workspace, _matrix_stride, _matrix_row_stride, nullptr, _output, _n_batches, _n_rows, _n_cols, _n_channels); Window win; auto win_last = output_transform.get_window(); win.set(Window::DimX, Window::Dimension(0, win_last, 1)); INEKernel::configure(win); } template void NEWinogradLayerTransformOutputKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_NULLPTR(_biases->buffer()); ARM_COMPUTE_ERROR_ON_NULLPTR(_output_workspace); ARM_COMPUTE_ERROR_ON_NULLPTR(_output); OutputTransform output_transform(_output_workspace, _matrix_stride, _matrix_row_stride, reinterpret_cast(_biases->buffer()), _output, _n_batches, _n_rows, _n_cols, _n_channels); // The code below cannot be moved to configure because biases hasn't been allocated at that point const size_t fst = window.x().start(); const size_t lst = window.x().end(); output_transform.run(fst, lst); } template bool NEWinogradLayerTransformOutputKernel::is_parallelisable() const { return false; } template class NEWinogradLayerTransformOutputKernel; template class NEWinogradLayerTransformOutputKernel; } // namespace arm_compute