/* * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__ #define __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__ #include "arm_compute/core/NEON/INEKernel.h" #include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp" #include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp" #include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp" #include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp" namespace arm_compute { class ITensor; template class INEWinogradLayerTransformInputKernel : public INEKernel { public: /** Determine how much memory (in units of TIn) to allocate for the * transformed input. * * @param[in] n_batches Number of batches in the input tensor. * @param[in] n_channels Number of feature maps in the input tensor. * @param[in] n_rows Number of rows in each feature map. * @param[in] n_cols Number of columns in each feature map. * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". */ virtual unsigned int get_input_storage_size(int n_batches, int n_channels, int n_rows, int n_cols, bool same_padding) const = 0; /** Gets the stride between matrices in the input worspace * * @param[in] kernel_shape The shape of the weights tensor. * @param[in] input_shape The shape of the input tensor. * @param[in] padding_type The type of padding to be used. * * @return Stride expressed in bytes. */ virtual int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const = 0; /** Configure the output transform kernel. * * @param[in] input Input tensor data * @param[in] n_batches Number of batches in input tensor. * @param[in] n_rows Number of rows in input tensor. * @param[in] n_cols Number of columns in input tensor. * @param[in] n_channels Number of channels in input tensor. * @param[in] padding Padding type. * @param[out] output Base of output matrices. * @param[in] matrix_stride Stride between output matrices. */ virtual void configure(const T *const input, const int n_batches, const int n_rows, const int n_cols, const int n_channels, const PaddingType padding, T *const output, const int matrix_stride) = 0; virtual ~INEWinogradLayerTransformInputKernel() { } }; template class NEWinogradLayerTransformInputKernel : public INEWinogradLayerTransformInputKernel { public: /** Determine how much memory (in units of TIn) to allocate for the * transformed input. * * @param[in] n_batches Number of batches in the input tensor. * @param[in] n_channels Number of feature maps in the input tensor. * @param[in] n_rows Number of rows in each feature map. * @param[in] n_cols Number of columns in each feature map. * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". */ unsigned int get_input_storage_size( int n_batches, int n_channels, int n_rows, int n_cols, bool same_padding) const override; /** Gets the stride between matrices in the input worspace * * @param[in] kernel_shape The shape of the weights tensor. * @param[in] input_shape The shape of the input tensor. * @param[in] padding_type The type of padding to be used. * * @return Stride expressed in bytes. */ int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const override; NEWinogradLayerTransformInputKernel(); const char *name() const override { return "NEWinogradLayerTransformInputKernel"; } /** Configure the output transform kernel. * * @param[in] input Input tensor data * @param[in] n_batches Number of batches in input tensor. * @param[in] n_rows Number of rows in input tensor. * @param[in] n_cols Number of columns in input tensor. * @param[in] n_channels Number of channels in input tensor. * @param[in] padding Padding type. * @param[out] output Base of output matrices. * @param[in] matrix_stride Stride between output matrices. */ void configure( const T *const input, const int n_batches, const int n_rows, const int n_cols, const int n_channels, const PaddingType padding, T *const output, const int matrix_stride) override; // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; bool is_parallelisable() const override; using WinogradBase = winograd::WinogradGEMM; using WinogradConv = typename WinogradBase::template Convolution; private: using InputTransform = typename WinogradBase::template InputTransform; std::unique_ptr _transform; }; template class INEWinogradLayerTransformOutputKernel : public INEKernel { public: /** Determine how much memory (in units of TOut) to allocate for the * (Winograd domain) output. * * @param[in] n_batches Number of batches in the output tensor. * @param[in] n_rows Number of rows in each feature map of the input tensor. * @param[in] n_cols Number of columns in each feature map of the input tensor. * @param[in] n_output_channels Number of feature maps in the output tensor. * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". */ virtual unsigned int get_output_storage_size(int n_batches, int n_rows, int n_cols, int n_output_channels, bool same_padding) const = 0; /** Gets the stride between matrices in the output worspace * * @param[in] kernel_shape The shape of the weights tensor. * @param[in] input_shape The shape of the input tensor. * @param[in] padding_type The type of padding to be used. * * @return Stride expressed in bytes. */ virtual int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const = 0; /** Get the output shape of a convolution. * * @param[in] kernel_shape The shape of the weights tensor. * @param[in] in_shape The shape of the input tensor. * @param[in] padding The type of padding to be used. * * @return Stride expressed in bytes. */ virtual Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const = 0; /** Configure the output transform kernel. * * @param[in] biases Pointer to the biases tensor. * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain. * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution::get_output_matrix_stride() * @param[out] output Pointer to NHWC ordered output tensor, in the spatial domain. * @param[in] n_batches Number of batches in the input tensor. * @param[in] n_rows Number of rows in output tensor. * @param[in] n_cols Number of columns in output tensor. * @param[in] n_channels Number of feature maps in the output tensor. */ virtual void configure( const ITensor *biases, const T *const output_workingspace, const int matrix_stride, T *const output, const int n_batches, const int n_rows, const int n_cols, const int n_channels) = 0; virtual ~INEWinogradLayerTransformOutputKernel() { } }; template class NEWinogradLayerTransformOutputKernel : public INEWinogradLayerTransformOutputKernel { public: const char *name() const override { return "NEWinogradLayerTransformOutputKernel"; } /** Constructor */ NEWinogradLayerTransformOutputKernel(); /** Prevent instances of this class from being copied (As this class contains pointers) */ NEWinogradLayerTransformOutputKernel(const NEWinogradLayerTransformOutputKernel &) = delete; /** Prevent instances of this class from being copied (As this class contains pointers) */ NEWinogradLayerTransformOutputKernel &operator=(const NEWinogradLayerTransformOutputKernel &) = delete; /** Allow instances of this class to be moved */ NEWinogradLayerTransformOutputKernel(NEWinogradLayerTransformOutputKernel &&) = default; /** Allow instances of this class to be moved */ NEWinogradLayerTransformOutputKernel &operator=(NEWinogradLayerTransformOutputKernel &&) = default; ~NEWinogradLayerTransformOutputKernel() = default; // Inherited methods overridden: /** Determine how much memory (in units of TOut) to allocate for the * (Winograd domain) output. * * @param[in] n_batches Number of batches in the output tensor. * @param[in] n_rows Number of rows in each feature map of the input tensor. * @param[in] n_cols Number of columns in each feature map of the input tensor. * @param[in] n_output_channels Number of feature maps in the output tensor. * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". */ unsigned int get_output_storage_size(int n_batches, int n_rows, int n_cols, int n_output_channels, bool same_padding) const override; /** Gets the stride between matrices in the output worspace * * @param[in] kernel_shape The shape of the weights tensor. * @param[in] input_shape The shape of the input tensor. * @param[in] padding_type The type of padding to be used. * * @return Stride expressed in bytes. */ int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const override; /** Get the output shape of a convolution. * * @param[in] kernel_shape The shape of the weights tensor. * @param[in] in_shape The shape of the input tensor. * @param[in] padding The type of padding to be used. * * @return Stride expressed in bytes. */ Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const override; /** Configure the output transform kernel. * * @param[in] biases Pointer to the biases tensor. * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain. * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution::get_output_matrix_stride() * @param[out] output Pointer to NHWC ordered output tensor, in the spatial domain. * @param[in] n_batches Number of batches in the input tensor. * @param[in] n_rows Number of rows in output tensor. * @param[in] n_cols Number of columns in output tensor. * @param[in] n_channels Number of feature maps in the output tensor. */ void configure( const ITensor *biases, const T *const output_workingspace, const int matrix_stride, T *const output, const int n_batches, const int n_rows, const int n_cols, const int n_channels) override; void run(const Window &window, const ThreadInfo &info) override; bool is_parallelisable() const override; private: using WinogradBase = winograd::WinogradGEMM; using WinogradConv = typename WinogradBase::template Convolution; using OutputTransform = typename WinogradBase::template OutputTransform; const ITensor *_biases; const T *_output_workspace; int _matrix_stride; int _matrix_row_stride; T *_output; int _n_batches; int _n_rows; int _n_cols; int _n_channels; }; template class INEWinogradLayerTransformWeightsKernel : public INEKernel { public: /** Determine how much memory (in units of T) to allocate for the * transformed weights. * * @param[in] n_output_channels Number of output feature maps. * @param[in] n_input_channels Number of input feature maps. */ virtual unsigned int get_weight_storage_size(int n_output_channels, int n_input_channels) const = 0; /** Gets the stride between matrices in the kernel worspace * * @param[in] kernel_shape The shape of the weights tensor. * * @return Stride expressed in bytes. */ virtual int get_matrix_stride(const KernelShape &kernel_shape) const = 0; /** Configure the weights transform kernel. * * @param[in] weights_hwio Pointer to the weights tensor * @param[in] output Pointer to working space for the output tensor in the Winograd domain. * @param[in] matrix_stride Stride across matrices in the output workspace. * @param[in] n_output_channels Number of filters. * @param[in] n_input_channels Number of channels in each filter. */ virtual void configure(const ITensor *weights_hwio, T *const output, const int matrix_stride, const int n_output_channels, const int n_input_channels) = 0; virtual ~INEWinogradLayerTransformWeightsKernel() { } }; template class NEWinogradLayerTransformWeightsKernel final : public INEWinogradLayerTransformWeightsKernel { public: NEWinogradLayerTransformWeightsKernel(); const char *name() const override { return "NEWinogradLayerTransformWeightsKernel"; } // Inherited methods overridden: void configure(const ITensor *weights_hwio, T *const output, const int matrix_stride, const int n_output_channels, const int n_input_channels) override; unsigned int get_weight_storage_size(int n_output_channels, int n_input_channels) const override; int get_matrix_stride(const KernelShape &kernel_shape) const override; void run(const Window &window, const ThreadInfo &info) override; bool is_parallelisable() const override; private: using WinogradBase = winograd::WinogradGEMM; using WinogradConv = typename WinogradBase::template Convolution; using WeightsTransform = typename WinogradBase::template WeightsTransform; std::unique_ptr _transform; }; template class INEWinogradLayerBatchedGEMMKernel : public INEKernel { public: /** Get the number of GEMMs to compute */ virtual unsigned int get_number_gemms() const = 0; /** Initialise the kernel * * @param[in] n_gemms Number of GEMMs to compute. * @param[in] M in_shape.n_batches * tile_rows * tile_cols. * @param[in] K Number of channels in the input tensor. * @param[in] N Number of channels in the output tensor. * @param[in] a_matrix_stride Stride between input matrices. * @param[in] a_row_stride Row stride inside input matrix. * @param[in] b_matrix_stride Stride between weights matrices. * @param[in] b_row_stride Row stride inside the weights matrix. * @param[in] c_matrix_stride Stride between output matrices. * @param[in] c_row_stride Row stride inside the output matrix. * @param[out] a_ptr Input workspace. * @param[out] b_ptr Kernel workspace. * @param[out] c_ptr Output workspace. */ virtual void configure( const unsigned int n_gemms, const int M, const int K, const int N, const int a_matrix_stride, const int a_row_stride, const int b_matrix_stride, const int b_row_stride, const int c_matrix_stride, const int c_row_stride, const TIn *const a_ptr, const TIn *const b_ptr, TOut *const c_ptr) = 0; /** Get the number of tiles per row */ virtual int get_output_tile_rows() const = 0; /** Get the number of tiles per columns */ virtual int get_output_tile_cols() const = 0; /** Get the number of blocks */ virtual int get_number_blocks() const = 0; }; template class NEWinogradLayerBatchedGEMMKernel : public INEWinogradLayerBatchedGEMMKernel { public: using WinogradBase = winograd::WinogradGEMM; using WinogradConv = typename WinogradBase::template Convolution; using MultiGEMM = winograd::BatchedBlockedGemm; static const int _output_tile_rows = OutputTileRows; static const int _output_tile_cols = OutputTileCols; const char *name() const override { return "NEWinogradLayerBatchedGEMMKernel"; } /** Constructor */ NEWinogradLayerBatchedGEMMKernel(); /** Prevent instances of this class from being copied (As this class contains pointers) */ NEWinogradLayerBatchedGEMMKernel(const NEWinogradLayerBatchedGEMMKernel &) = delete; /** Prevent instances of this class from being copied (As this class contains pointers) */ NEWinogradLayerBatchedGEMMKernel &operator=(const NEWinogradLayerBatchedGEMMKernel &) = delete; /** Allow instances of this class to be moved */ NEWinogradLayerBatchedGEMMKernel(NEWinogradLayerBatchedGEMMKernel &&) = default; /** Allow instances of this class to be moved */ NEWinogradLayerBatchedGEMMKernel &operator=(NEWinogradLayerBatchedGEMMKernel &&) = default; ~NEWinogradLayerBatchedGEMMKernel() = default; // Inherited methods overridden: unsigned int get_number_gemms() const override; int get_output_tile_rows() const override; int get_output_tile_cols() const override; int get_number_blocks() const override; /** Initialise the kernel * * @param[in] n_gemms Number of GEMMs to compute. * @param[in] M in_shape.n_batches * tile_rows * tile_cols. * @param[in] K Number of channels in the input tensor. * @param[in] N Number of channels in the output tensor. * @param[in] a_matrix_stride Stride between input matrices. * @param[in] a_row_stride Row stride inside input matrix. * @param[in] b_matrix_stride Stride between weights matrices. * @param[in] b_row_stride Row stride inside the weights matrix. * @param[in] c_matrix_stride Stride between output matrices. * @param[in] c_row_stride Row stride inside the output matrix. * @param[out] a_ptr Input workspace. * @param[out] b_ptr Kernel workspace. * @param[out] c_ptr Output workspace. */ void configure( const unsigned int n_gemms, const int M, const int K, const int N, const int a_matrix_stride, const int a_row_stride, const int b_matrix_stride, const int b_row_stride, const int c_matrix_stride, const int c_row_stride, const TIn *const a_ptr, const TIn *const b_ptr, TOut *const c_ptr) override; void run(const Window &window, const ThreadInfo &info) override; private: std::unique_ptr _gemms; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__*/