aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2020-06-09 14:52:15 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-06-17 15:33:51 +0000
commit6ad60af32af672f27e152bf37790cd0c0c4db696 (patch)
tree43fb0f8926d30801ef1355676545297c82ae248a /src
parent1fd2c80692ed8ecefc4d8deb783564ad19eaf70c (diff)
downloadComputeLibrary-6ad60af32af672f27e152bf37790cd0c0c4db696.tar.gz
COMPMID-3520: Move ndrange.hpp header from arm_gemm to assembly
Change-Id: I6352a520ce38230cdfbad346b176cb659ab242a7 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3327 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h597
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_native.hpp2
-rw-r--r--src/core/NEON/kernels/assembly/Helpers.cpp4
-rw-r--r--src/core/NEON/kernels/assembly/Helpers.h122
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h120
-rw-r--r--src/core/NEON/kernels/assembly/arm_gemm.hpp176
-rw-r--r--src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp122
-rw-r--r--src/core/NEON/kernels/assembly/gemm_common.hpp201
-rw-r--r--src/core/NEON/kernels/assembly/ndrange.hpp185
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd.hpp621
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp207
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp6
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp4
16 files changed, 2366 insertions, 9 deletions
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 3100bf7308..c3da5ca0e2 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
+#include "src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/Error.h"
@@ -35,6 +35,8 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "support/MemorySupport.h"
+#include "src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp"
+
namespace arm_compute
{
//Batched Gemms
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
new file mode 100644
index 0000000000..bd141ef50b
--- /dev/null
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2017-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_NEGEMMWINOGRADCONVOLUTIONLAYERKERNEL_H
+#define ARM_COMPUTE_NEGEMMWINOGRADCONVOLUTIONLAYERKERNEL_H
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
+
+#include "src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp"
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Interface for the NEON kernel to perform Winograd input transform. */
+class INEWinogradLayerTransformInputKernel : public INEKernel
+{
+public:
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param num_threads The greatest number of threads that will be used to execute the transform.
+ * @return Size of working space required in bytes.
+ */
+ virtual unsigned int get_working_space_size(unsigned int num_threads) const = 0;
+
+ /** Determine how much memory (in units of TIn) to allocate for the
+ * transformed input.
+ *
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_channels Number of feature maps in the input tensor.
+ * @param[in] num_rows Number of rows in each feature map.
+ * @param[in] num_cols Number of columns in each feature map.
+ * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
+ *
+ * @return Storage size (in units of TIn) required.
+ */
+ virtual unsigned int get_input_storage_size(int num_batches, int num_channels, int num_rows, int num_cols, bool same_padding) const = 0;
+
+ /** Gets the stride between matrices in the input worspace
+ *
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_channels Number of feature maps in the input tensor.
+ * @param[in] num_rows Number of rows in each feature map.
+ * @param[in] num_cols Number of columns in each feature map.
+ * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
+ *
+ * @return Stride expressed in bytes.
+ */
+ virtual int get_matrix_stride(int num_batches, int num_channels, int num_rows, int num_cols, bool same_padding) const = 0;
+
+ /** Configure the output transform kernel.
+ *
+ * @param[in] input_nhwc Input tensor in NHWC data layout format.
+ * @param[in] num_batches Number of batches in input tensor.
+ * @param[in] num_rows Number of rows in input tensor.
+ * @param[in] num_cols Number of columns in input tensor.
+ * @param[in] num_channels Number of channels in input tensor.
+ * @param[in] padding Padding type.
+ * @param[out] output Base of output matrices.
+ * @param[in] matrix_stride Stride between output matrices.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
+ */
+ virtual void configure(const ITensor *input_nhwc, const int num_batches, const int num_rows, const int num_cols, const int num_channels,
+ const PaddingType padding, ITensor *output, const int matrix_stride, ITensor *workspace) = 0;
+
+ /** Destructor */
+ virtual ~INEWinogradLayerTransformInputKernel()
+ {
+ }
+};
+
+/** NEON kernel to perform Winograd input transform. */
+template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
+class NEWinogradLayerTransformInputKernel : public INEWinogradLayerTransformInputKernel
+{
+public:
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWinogradLayerTransformInputKernel(const NEWinogradLayerTransformInputKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWinogradLayerTransformInputKernel &operator=(const NEWinogradLayerTransformInputKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEWinogradLayerTransformInputKernel(NEWinogradLayerTransformInputKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEWinogradLayerTransformInputKernel &operator=(NEWinogradLayerTransformInputKernel &&) = default;
+ /** Default destructor */
+ ~NEWinogradLayerTransformInputKernel() = default;
+
+ /** Determine how much memory (in units of TIn) to allocate for the
+ * transformed input.
+ *
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_channels Number of feature maps in the input tensor.
+ * @param[in] num_rows Number of rows in each feature map.
+ * @param[in] num_cols Number of columns in each feature map.
+ * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
+ *
+ * @return Storage size (in units of TIn) required.
+ */
+ unsigned int get_input_storage_size(
+ int num_batches,
+ int num_channels,
+ int num_rows,
+ int num_cols,
+ bool same_padding) const override;
+
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param[in] num_threads The greatest number of threads that will be used to execute the transform.
+ *
+ * @return Size of working space required in bytes.
+ */
+ unsigned int get_working_space_size(unsigned int num_threads) const override;
+
+ /** Gets the stride between matrices in the input worspace
+ *
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_channels Number of feature maps in the input tensor.
+ * @param[in] num_rows Number of rows in each feature map.
+ * @param[in] num_cols Number of columns in each feature map.
+ * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
+ *
+ * @return Stride expressed in bytes.
+ */
+ int get_matrix_stride(
+ int num_batches,
+ int num_channels,
+ int num_rows,
+ int num_cols,
+ bool same_padding) const override;
+
+ /** Default constructor */
+ NEWinogradLayerTransformInputKernel();
+
+ const char *name() const override
+ {
+ return "NEWinogradLayerTransformInputKernel";
+ }
+
+ /** Configure the output transform kernel.
+ *
+ * @param[in] input_nhwc Input tensor. Data types supported: F16/F32. Layout supported NHWC.
+ * @param[in] num_batches Number of batches in input tensor.
+ * @param[in] num_rows Number of rows in input tensor.
+ * @param[in] num_cols Number of columns in input tensor.
+ * @param[in] num_channels Number of channels in input tensor.
+ * @param[in] padding Padding type.
+ * @param[out] output Base of output matrices.
+ * @param[in] matrix_stride Stride between output matrices.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
+ */
+ void configure(
+ const ITensor *input_nhwc,
+ const int num_batches,
+ const int num_rows,
+ const int num_cols,
+ const int num_channels,
+ const PaddingType padding,
+ ITensor *output,
+ const int matrix_stride,
+ ITensor *workspace) override;
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+ /** Winograd base kernel */
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
+ /** Winograd convolution kernel */
+ using WinogradConv = typename WinogradBase::template Convolution<T, T>;
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformInputKernel
+ *
+ * @param[in] input First tensor input info. Data types supported: F16/F32.
+ * @param[in] output Output tensor info. Data types supported: same as @p input.
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
+
+private:
+ using InputTransform = typename WinogradBase::template InputTransform<T, T>;
+
+ std::unique_ptr<InputTransform> _transform{ nullptr };
+ const ITensor *_input_nhwc;
+ int _num_batches; /**< Number of batches in input tensor. */
+ int _num_rows; /**< Number of rows in input tensor. */
+ int _num_cols; /**< Number of columns in input tensor. */
+ int _num_channels; /**< Number of channels in input tensor. */
+ PaddingType _padding; /**< Padding type. */
+ ITensor *_output; /**< Base of output matrices. */
+ int _matrix_stride; /**< Stride between output matrices. */
+ int _padding_top; /**< Padding to apply to the top of the image. */
+ int _padding_left; /**< Padding to apply to the left of the image. */
+ int _padding_right; /**< Padding to apply to the right of the image. */
+ int _padding_bottom; /**< Padding to apply to the bottom of the image. */
+ ITensor *_workspace;
+};
+
+/** Interface for the NEON kernel to perform Winograd output transform. */
+class INEWinogradLayerTransformOutputKernel : public INEKernel
+{
+public:
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param[in] num_threads The greatest number of threads that will be used to execute the transform.
+ *
+ * @return Size of working space required in bytes.
+ */
+ virtual unsigned int get_working_space_size(unsigned int num_threads) const = 0;
+
+ /** Determine how much memory (in units of TOut) to allocate for the
+ * (Winograd domain) output.
+ *
+ * @param[in] num_batches Number of batches in the output tensor.
+ * @param[in] num_rows Number of rows in each feature map of the input tensor.
+ * @param[in] num_cols Number of columns in each feature map of the input tensor.
+ * @param[in] num_output_channels Number of feature maps in the output tensor.
+ *
+ * @return Storage size (in units of TOut) required.
+ */
+ virtual unsigned int get_output_storage_size(int num_batches, int num_rows, int num_cols, int num_output_channels) const = 0;
+
+ /** Gets the stride between matrices in the output worspace
+ *
+ * @param[in] num_batches Number of batches in the output tensor.
+ * @param[in] num_rows Number of rows in each feature map of the input tensor.
+ * @param[in] num_cols Number of columns in each feature map of the input tensor.
+ * @param[in] num_output_channels Number of feature maps in the output tensor.
+ *
+ * @return Stride expressed in bytes.
+ */
+ virtual int get_matrix_stride(int num_batches, int num_rows, int num_cols, int num_output_channels) const = 0;
+
+ /** Get the output shape of a convolution.
+ *
+ * @param[in] num_rows Number of rows in each feature map of the input tensor.
+ * @param[in] num_cols Number of columns in each feature map of the input tensor.
+ * @param[in] padding_same True if padding is SAME, false otherwise
+ *
+ * @return Shape of the output tensor
+ */
+ virtual std::pair<unsigned int, unsigned int> get_output_shape(
+ int num_rows, /* Number of rows in each feature map of the input tensor. */
+ int num_cols, /* Number of columns in each feature map of the input tensor. */
+ bool padding_same /* True if padding is SAME, false otherwise */
+ ) const = 0;
+
+ /** Configure the output transform kernel.
+ *
+ * @param[in] biases Pointer to the biases tensor.
+ * @param[in] transformed_output Pointer to working space for the output tensor in the Winograd domain.
+ * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
+ * @param[out] output_nhwc Pointer to a tensor in NHWC data layout ordered output tensor, in the spatial domain.
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_rows Number of rows in output tensor.
+ * @param[in] num_cols Number of columns in output tensor.
+ * @param[in] num_channels Number of feature maps in the output tensor.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
+ * @param[in] activation Activation to be used
+ */
+ virtual void configure(
+ const ITensor *biases,
+ const ITensor *transformed_output,
+ const int matrix_stride,
+ ITensor *output_nhwc,
+ const int num_batches,
+ const int num_rows,
+ const int num_cols,
+ const int num_channels,
+ ITensor *workspace,
+ const arm_gemm::Activation &activation) = 0;
+
+ virtual ~INEWinogradLayerTransformOutputKernel()
+ {
+ }
+};
+
+/** NEON kernel to perform Winograd output transform. */
+template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
+class NEWinogradLayerTransformOutputKernel : public INEWinogradLayerTransformOutputKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEWinogradLayerTransformOutputKernel";
+ }
+ /** Constructor */
+ NEWinogradLayerTransformOutputKernel();
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWinogradLayerTransformOutputKernel(const NEWinogradLayerTransformOutputKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWinogradLayerTransformOutputKernel &operator=(const NEWinogradLayerTransformOutputKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEWinogradLayerTransformOutputKernel(NEWinogradLayerTransformOutputKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEWinogradLayerTransformOutputKernel &operator=(NEWinogradLayerTransformOutputKernel &&) = default;
+ /** Default destructor */
+ ~NEWinogradLayerTransformOutputKernel() = default;
+
+ // Inherited methods overridden:
+ /** Determine how much memory (in units of TOut) to allocate for the
+ * (Winograd domain) output.
+ *
+ * @param[in] num_batches Number of batches in the output tensor.
+ * @param[in] num_rows Number of rows in each feature map of the input tensor.
+ * @param[in] num_cols Number of columns in each feature map of the input tensor.
+ * @param[in] num_output_channels Number of feature maps in the output tensor.
+ *
+ * @return Storage size (in units of TOut) required.
+ */
+ unsigned int get_output_storage_size(int num_batches, int num_rows, int num_cols, int num_output_channels) const override;
+
+ /** Gets the stride between matrices in the output worspace
+ *
+ * @param[in] num_batches Number of batches in the output tensor.
+ * @param[in] num_rows Number of rows in each feature map of the input tensor.
+ * @param[in] num_cols Number of columns in each feature map of the input tensor.
+ * @param[in] num_output_channels Number of feature maps in the output tensor.
+ *
+ * @return Stride expressed in bytes.
+ */
+ int get_matrix_stride(int num_batches, int num_rows, int num_cols, int num_output_channels) const override;
+ /** Get the output shape of a convolution.
+ *
+ * @param[in] num_rows Number of rows in each feature map of the input tensor.
+ * @param[in] num_cols Number of columns in each feature map of the input tensor.
+ * @param[in] padding_same True if padding is SAME, false otherwise
+ *
+ * @return Shape of the output tensor
+ */
+ std::pair<unsigned int, unsigned int> get_output_shape(
+ int num_rows, /* Number of rows in each feature map of the input tensor. */
+ int num_cols, /* Number of columns in each feature map of the input tensor. */
+ bool padding_same) const override;
+
+ /** Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param[in] num_threads The greatest number of threads that will be used to execute the transform.
+ *
+ * @return Size of working space required in bytes.
+ */
+ unsigned int get_working_space_size(unsigned int num_threads) const override;
+
+ /** Configure the output transform kernel.
+ *
+ * @param[in] biases Pointer to the biases tensor.
+ * @param[in] transformed_output Pointer to working space for the output tensor in the Winograd domain.
+ * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
+ * @param[out] output_nhwc Pointer to a tensor with NHWC data layout, in the spatial domain.
+ * @param[in] num_batches Number of batches in the input tensor.
+ * @param[in] num_rows Number of rows in output tensor.
+ * @param[in] num_cols Number of columns in output tensor.
+ * @param[in] num_channels Number of feature maps in the output tensor.
+ * @param[in] workspace Tensor to be used as the working space during the computation.
+ * @param[in] activation Activation to be used
+ */
+ void configure(
+ const ITensor *biases,
+ const ITensor *transformed_output,
+ const int matrix_stride,
+ ITensor *output_nhwc,
+ const int num_batches,
+ const int num_rows,
+ const int num_cols,
+ const int num_channels,
+ ITensor *workspace,
+ const arm_gemm::Activation &activation) override;
+
+ void run(const Window &window, const ThreadInfo &info) override;
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformOutputKernel
+ *
+ * @param[in] input Source tensor info with shape [C, N, 16, batches] or [C, N, 36, batches]. Data types supported: F16/F32.
+ * @param[in] bias Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
+ * @param[in] output Destination tensor info with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info);
+
+private:
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
+ using WinogradConv = typename WinogradBase::template Convolution<T, T>;
+ using OutputTransform = typename WinogradBase::template OutputTransform<T, T>;
+
+ std::unique_ptr<OutputTransform> _transform{ nullptr };
+ const ITensor *_biases;
+ const ITensor *_transformed_output;
+ ITensor *_workspace;
+ int _matrix_stride;
+ int _matrix_row_stride;
+ ITensor *_output_nhwc;
+ int _num_batches;
+ int _num_rows;
+ int _num_cols;
+ int _num_channels;
+};
+
+/** Interface for the NEON kernel to perform Winograd weights transform. */
+class INEWinogradLayerTransformWeightsKernel : public INEKernel
+{
+public:
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ INEWinogradLayerTransformWeightsKernel(const INEWinogradLayerTransformWeightsKernel &) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ INEWinogradLayerTransformWeightsKernel &operator=(const INEWinogradLayerTransformWeightsKernel &) = default;
+ /** Allow instances of this class to be moved */
+ INEWinogradLayerTransformWeightsKernel(INEWinogradLayerTransformWeightsKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ INEWinogradLayerTransformWeightsKernel &operator=(INEWinogradLayerTransformWeightsKernel &&) = default;
+
+ INEWinogradLayerTransformWeightsKernel()
+ {
+ }
+ virtual ~INEWinogradLayerTransformWeightsKernel()
+ {
+ }
+ /** Determine how much memory (in units of T) to allocate for the
+ * transformed weights.
+ *
+ * @param[in] num_output_channels Number of output feature maps.
+ * @param[in] num_input_channels Number of input feature maps.
+ *
+ * @return Storage size (in units of T) required.
+ */
+ virtual unsigned int get_weight_storage_size(int num_output_channels, int num_input_channels) const = 0;
+ /** Gets the stride between matrices in the kernel worspace
+ *
+ * @param[in] num_output_channels Number of output feature maps.
+ * @param[in] num_input_channels Number of input feature maps.
+ *
+ * @return Stride expressed in bytes.
+ */
+ virtual int get_matrix_stride(int num_output_channels, int num_input_channels) const = 0;
+
+ /** Configure the weights transform kernel.
+ *
+ * @param[in] weights_hwio Pointer to the weights tensor
+ * @param[out] output Pointer to working space for the output tensor in the Winograd domain.
+ * @param[in] matrix_stride Stride across matrices in the output workspace.
+ * @param[in] num_output_channels Number of filters.
+ * @param[in] num_input_channels Number of channels in each filter.
+ */
+
+ virtual void configure(const ITensor *weights_hwio, ITensor *output, const int matrix_stride, const int num_output_channels, const int num_input_channels) = 0;
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformWeightsKernel
+ *
+ * @param[in] input First tensor input info. Data types supported: F16/F32.
+ * @param[in] weights Weights tensor info. Data types supported: same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights);
+};
+
+/** NEON kernel to perform Winograd weights transform. */
+template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
+class NEWinogradLayerTransformWeightsKernel final : public INEWinogradLayerTransformWeightsKernel
+{
+public:
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWinogradLayerTransformWeightsKernel(const NEWinogradLayerTransformWeightsKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWinogradLayerTransformWeightsKernel &operator=(const NEWinogradLayerTransformWeightsKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEWinogradLayerTransformWeightsKernel(NEWinogradLayerTransformWeightsKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEWinogradLayerTransformWeightsKernel &operator=(NEWinogradLayerTransformWeightsKernel &&) = default;
+ /** Default destructor */
+ ~NEWinogradLayerTransformWeightsKernel() = default;
+
+ /** Default constructor. */
+ NEWinogradLayerTransformWeightsKernel();
+ const char *name() const override
+ {
+ return "NEWinogradLayerTransformWeightsKernel";
+ }
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformWeightsKernel
+ *
+ * @param[in] input Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout).
+ * kernel_x must be 3 and equal to kernel_y. Data types supported: F16/F32.
+ * @param[in] output Destination tensor info. The output is a 3D tensor with dimensions [OFM, IFM, 16] or [OFM, IFM, 36]. Data type supported: same as @p input
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
+
+ // Inherited methods overridden:
+
+#ifndef DOXYGEN_SKIP_THIS
+ /** Configure the weights transform kernel.
+ *
+ * @param[in] weights_hwio Pointer to the weights tensor
+ * @param[out] output Pointer to working space for the output tensor in the Winograd domain.
+ * @param[in] matrix_stride Stride across matrices in the output workspace.
+ * @param[in] num_output_channels Number of filters.
+ * @param[in] num_input_channels Number of channels in each filter.
+ */
+ void configure(const ITensor *weights_hwio, ITensor *output, const int matrix_stride, const int num_output_channels, const int num_input_channels) override;
+#endif /* DOXYGEN_SKIP_THIS */
+
+ /** Determine how much memory (in units of T) to allocate for the
+ * transformed weights.
+ *
+ * @param[in] num_output_channels Number of output feature maps.
+ * @param[in] num_input_channels Number of input feature maps.
+ *
+ * @return Storage size (in units of T) required.
+ */
+ unsigned int get_weight_storage_size(int num_output_channels, int num_input_channels) const override;
+
+ /** Gets the stride between matrices in the input worspace
+ *
+ * @param[in] num_output_channels Number of output feature maps.
+ * @param[in] num_input_channels Number of input feature maps.
+ *
+ * @return Stride expressed in bytes.
+ */
+ int get_matrix_stride(int num_output_channels, int num_input_channels) const override;
+ void run(const Window &window, const ThreadInfo &info) override;
+ bool is_parallelisable() const override;
+
+private:
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
+ using WinogradConv = typename WinogradBase::template Convolution<T, T>;
+ using WeightsTransform = typename WinogradBase::template WeightsTransform<T, T>;
+
+ std::unique_ptr<WeightsTransform> _transform{ nullptr };
+ const ITensor *_weights_hwio;
+ ITensor *_output;
+ int _matrix_stride;
+ int _num_output_channels;
+ int _num_input_channels;
+};
+
+/** NEON kernel to perform Winograd. */
+template <typename TIn, typename TOut, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
+class NEWinogradLayerConfiguration
+{
+public:
+ /** Winograd base kernel */
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, winograd::WinogradRoots::Integers>;
+ /** Winograd convolution kernel */
+
+ using WinogradConv = typename WinogradBase::template Convolution<TIn, TOut>;
+
+ using TransformInputKernel = NEWinogradLayerTransformInputKernel<TIn, OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+ using TransformWeightsKernel = NEWinogradLayerTransformWeightsKernel<TIn, OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+ using TransformOutputKernel = NEWinogradLayerTransformOutputKernel<TOut, OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
+};
+
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_NEGEMMWINOGRADCONVOLUTIONLAYERKERNEL_H*/
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
index aeeed26702..0ce323e09d 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
@@ -31,7 +31,7 @@
#include "bias_adder.hpp"
#include "utils.hpp"
-#include "arm_compute/core/NEON/kernels/arm_gemm/ndrange.hpp"
+#include "ndrange.hpp"
#include "mergeresults.hpp"
#include "transform.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
index 6897e64d4b..d9b1a71ea8 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
@@ -30,7 +30,7 @@
#include "arm_gemm.hpp"
#include "utils.hpp"
-#include "arm_compute/core/NEON/kernels/arm_gemm/ndrange.hpp"
+#include "ndrange.hpp"
#include "mergeresults.hpp"
#include "transform.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_native.hpp b/src/core/NEON/kernels/arm_gemm/gemm_native.hpp
index fb01a731b8..c2f742b5cf 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_native.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_native.hpp
@@ -27,7 +27,7 @@
#include "arm_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/arm_gemm/ndrange.hpp"
+#include "ndrange.hpp"
#ifdef CYCLE_PROFILING
#include "profiler.hpp"
diff --git a/src/core/NEON/kernels/assembly/Helpers.cpp b/src/core/NEON/kernels/assembly/Helpers.cpp
index 93ea6c8d5e..5990505a59 100644
--- a/src/core/NEON/kernels/assembly/Helpers.cpp
+++ b/src/core/NEON/kernels/assembly/Helpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+#include "src/core/NEON/kernels/assembly/Helpers.h"
namespace arm_compute
{
diff --git a/src/core/NEON/kernels/assembly/Helpers.h b/src/core/NEON/kernels/assembly/Helpers.h
new file mode 100644
index 0000000000..09c0446ada
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/Helpers.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_ASSEMBLY_HELPERS_H
+#define ARM_COMPUTE_ASSEMBLY_HELPERS_H
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/Utils.h"
+
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_gemm.hpp"
+
+namespace arm_compute
+{
+/** Block sizes to use to break the M, N, K dimension */
+struct BlockSizes
+{
+ unsigned int k_block{ 0 }; /**< Block size alon the K dimension */
+ unsigned int x_block{ 0 }; /**< Block size along the N (x) dimension */
+ unsigned int m_round{ 0 }; /**< Block size along the M dimension (Must be a multiple of strategy_out_height) */
+ unsigned int strategy_out_height{ 0 }; /**< Number of rows (M) processed by the selected strategy */
+};
+
+/** Extracts the kernel description of the selected kernel by the GEMM backend heuristics
+ *
+ * @param[in] input_type Data type of the input tensor.
+ * @param[in] ci CPU information.
+ * @param[in] num_threads Maximum number of threads that might be used for the calculations.
+ * @param[in] p M, N, K sizes.
+ * @param[in] activation Activation struct
+ * @param[in] pretranspose_hint Is B also pretransposed ?
+ *
+ * @return Kernel description that the assembly heuristics picked for the given configuration
+ */
+arm_gemm::KernelDescription get_gemm_info(DataType input_type,
+ const CPUInfo &ci,
+ const unsigned int num_threads,
+ const INEGEMMWrapperKernel::Params &p,
+ arm_gemm::Activation activation,
+ bool pretranspose_hint);
+
+/** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used
+ *
+ * @param[in] ci CPU information.
+ * @param[in] M M dimension.
+ * @param[in] N N dimension.
+ * @param[in] K K dimension.
+ *
+ * @return Recommeded block sizes to use for the given M, N, K dimensions.
+ */
+template <typename strategy>
+BlockSizes calculate_block_sizes(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K)
+{
+ BlockSizes bs;
+
+ using Toi = typename strategy::operand_type;
+
+ const unsigned int L1_size = ci.get_L1_cache_size();
+ const unsigned int L2_size = ci.get_L2_cache_size();
+
+ // Work out blocking parameters
+
+ // k_block: Find out how much of the larger array can be loaded into half the cache.
+ // This should account for associative caches.
+ bs.k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
+
+ // Needs to be (at least a single) multiple of the K unroll level.
+ bs.k_block /= strategy::k_unroll();
+ bs.k_block = std::max(bs.k_block, 1U) * strategy::k_unroll();
+
+ // Now tune to presented problem size; this is how many blocks we need.
+ int num_k_blocks = DIV_CEIL(K, bs.k_block);
+
+ // So divide the space equally into that many blocks.
+ bs.k_block = DIV_CEIL(K, num_k_blocks);
+
+ // And round UP to the K unroll level required.
+ bs.k_block = ceil_to_multiple(bs.k_block, strategy::k_unroll());
+
+ // x_block: Work out how many rows (of length k_block) will fit in the L2
+ // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
+ bs.x_block = (((L2_size * 9) / 10) - (bs.k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) / (sizeof(Toi) * bs.k_block);
+
+ // Needs to be (at least a single) multiple of the kernel output width.
+ bs.x_block /= strategy::out_width();
+ bs.x_block = std::max(bs.x_block, 1U) * strategy::out_width();
+
+ // And tune to the presented problem size.
+ int num_x_blocks = DIV_CEIL(N, bs.x_block);
+ bs.x_block = DIV_CEIL(N, num_x_blocks);
+
+ bs.x_block = ceil_to_multiple(bs.x_block, strategy::out_width());
+
+ // Work out the rounded size of M - needed for some buffers.
+ bs.m_round = ceil_to_multiple(M, strategy::out_height());
+ bs.strategy_out_height = strategy::out_height();
+
+ return bs;
+}
+
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_ASSEMBLY_HELPERS_H */
diff --git a/src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h b/src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h
new file mode 100644
index 0000000000..2d3d805553
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H
+#define ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_gemm_compute_iface.hpp"
+
+#include "gemm_common.hpp"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** This class is a wrapper for the assembly kernels.
+ *
+ * Some kernels were written in assembly and highly optimised for specific CPUs like A53 or A55.
+ * This class works as a wrapper for these assembly kernels. The arm compute library creates an instance
+ * of NEGEMMAssemblyWrapperKernel and other auxiliary data structures to execute a single assembly kernel
+ * in the context of an NEFunctions.
+ *
+ * The type T is the type of the actual kernel implemented in assembly which is of type
+ * template<typename To, typename Tr> class GemmCommon
+ *
+ *
+ */
+template <typename TypeInput, typename TypeOutput>
+class NEGEMMAssemblyWrapperKernel final : public INEKernel
+{
+public:
+ /** Constructor
+ */
+ NEGEMMAssemblyWrapperKernel()
+ : _kernel(nullptr), _name("NEGEMMAssemblyWrapperKernel")
+ {
+ }
+
+ NEGEMMAssemblyWrapperKernel(NEGEMMAssemblyWrapperKernel &) = delete;
+ NEGEMMAssemblyWrapperKernel(NEGEMMAssemblyWrapperKernel &&) = default;
+ NEGEMMAssemblyWrapperKernel &operator=(NEGEMMAssemblyWrapperKernel &) = delete;
+
+ const char *name() const override
+ {
+ return _name.c_str();
+ }
+
+ void run(const Window &window, const ThreadInfo &info) override
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast<void *>(_kernel)));
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+
+ auto win = arm_gemm::to_ndcoord(window);
+
+ arm_gemm::ndcoord_t thread_locator{};
+
+ _kernel->execute(win, thread_locator, info.thread_id);
+ }
+
+ // Inherited methods overridden:
+ void run_nd(const Window &window, const ThreadInfo &info, const Window &thread_locator) override
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast<void *>(_kernel)));
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+
+ //convert between arm_compute and arm_gemm types
+ auto ndc_win = arm_gemm::to_ndcoord(window);
+ auto ndc_tlc = arm_gemm::to_ndcoord(thread_locator);
+
+ _kernel->execute(ndc_win, ndc_tlc, info.thread_id);
+ }
+
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] kernel Pointer to an assembly kernel implementation.
+ * @param[in] num_threads Number of concurrent threads which will execute the kernel.
+ */
+ void configure(arm_gemm::GemmCommon<TypeInput, TypeOutput> *kernel, std::string kernel_name_tag)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast<void *>(kernel)));
+ _kernel = kernel;
+
+ Window win = to_window(kernel->get_window_size());
+
+ INEKernel::configure(win);
+
+ if(!kernel_name_tag.empty())
+ {
+ _name += "/" + kernel_name_tag;
+ }
+ }
+
+private:
+ arm_gemm::GemmCommon<TypeInput, TypeOutput> *_kernel;
+ std::string _name;
+};
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H */
diff --git a/src/core/NEON/kernels/assembly/arm_gemm.hpp b/src/core/NEON/kernels/assembly/arm_gemm.hpp
new file mode 100644
index 0000000000..7723224ec8
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/arm_gemm.hpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2018-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include <memory>
+#include <cstring>
+
+#include "arm_gemm_local.hpp"
+#include "gemm_common.hpp"
+
+namespace arm_gemm {
+
+enum class GemmMethod
+{
+ DEFAULT,
+ GEMV_BATCHED,
+ GEMV_PRETRANSPOSED,
+ GEMV_NATIVE_TRANSPOSED,
+ GEMM_NATIVE,
+ GEMM_HYBRID,
+ GEMM_INTERLEAVED,
+ GEMM_INTERLEAVED_2D,
+ QUANTIZE_WRAPPER,
+ GEMM_HYBRID_QUANTIZED
+};
+
+struct KernelDescription
+{
+ GemmMethod method = GemmMethod::DEFAULT;
+ std::string name = "";
+ bool is_default = false;
+
+ KernelDescription(GemmMethod m, std::string n, bool d=false) : method(m), name(n), is_default(d) { }
+ KernelDescription() noexcept { }
+};
+
+struct GemmConfig
+{
+ GemmMethod method = GemmMethod::DEFAULT;
+ std::string filter = "";
+ unsigned int inner_block_size = 0;
+ unsigned int outer_block_size = 0;
+
+ GemmConfig(GemmMethod method) : method(method) { }
+ GemmConfig() { }
+};
+
+struct Activation
+{
+ enum class Type {
+ None,
+ ReLU,
+ BoundedReLU
+ };
+
+ Type type;
+ float param1;
+ float param2;
+
+ Activation(Type type=Type::None, float p1=0.0f, float p2=0.0f) : type(type), param1(p1), param2(p2) { }
+};
+
+struct GemmArgs
+{
+public:
+ const CPUInfo *_ci;
+ unsigned int _Msize;
+ unsigned int _Nsize;
+ unsigned int _Ksize;
+ unsigned int _nbatches;
+ unsigned int _nmulti;
+ bool _trA;
+ bool _trB;
+ Activation _act;
+ int _maxthreads;
+ bool _pretransposed_hint;
+ const GemmConfig *_cfg;
+
+ GemmArgs(const CPUInfo *ci, const unsigned int M, const unsigned int N,
+ const unsigned int K, const unsigned int nbatches,
+ const unsigned int nmulti, const bool trA, const bool trB,
+ Activation act, const int maxthreads,
+ const bool pretransposed_hint, const GemmConfig *cfg=nullptr ) :
+ _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
+ _trA(trA), _trB(trB), _act(act), _maxthreads(maxthreads),
+ _pretransposed_hint(pretransposed_hint), _cfg(cfg)
+ {
+ }
+};
+
+struct Requantize32
+{
+public:
+ const int32_t *bias = nullptr;
+ size_t bias_multi_stride = 0;
+ int32_t a_offset = 0;
+ int32_t b_offset = 0;
+ int32_t c_offset = 0;
+ bool per_channel_requant = false;
+ int32_t per_layer_shift = 0;
+ int32_t per_layer_mul = 0;
+ const int32_t *per_channel_shifts = nullptr;
+ const int32_t *per_channel_muls = nullptr;
+ int32_t minval = 0;
+ int32_t maxval = 0;
+
+ Requantize32() = default;
+
+ // Constructor for per-tensor quantization
+ Requantize32(const int32_t *bias, size_t bias_multi_stride,
+ int32_t a_offset, int32_t b_offset, int32_t c_offset,
+ int32_t requant_shift, int32_t requant_mul,
+ int32_t minv, int32_t maxv) :
+ bias(bias), bias_multi_stride(bias_multi_stride),
+ a_offset(a_offset), b_offset(b_offset), c_offset(c_offset),
+ per_channel_requant(false), per_layer_shift(requant_shift), per_layer_mul(requant_mul),
+ minval(minv), maxval(maxv)
+ {
+ }
+
+ // Constructor for per-channel quantization
+ Requantize32(const int32_t *bias, size_t bias_multi_stride,
+ int32_t a_offset, int32_t b_offset, int32_t c_offset,
+ const int32_t *requant_shifts, const int32_t *requant_muls,
+ int32_t minv, int32_t maxv) :
+ bias(bias), bias_multi_stride(bias_multi_stride),
+ a_offset(a_offset), b_offset(b_offset), c_offset(c_offset),
+ per_channel_requant(true), per_channel_shifts(requant_shifts), per_channel_muls(requant_muls),
+ minval(minv), maxval(maxv)
+ {
+ }
+};
+
+struct Nothing
+{
+};
+
+template<typename Top, typename Tret>
+using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret> >;
+
+/* Low level API calls.
+ * These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */
+
+/* get_gemm_method(): Given the templated types and provided parameters,
+ * which is the preferred method to implement this GEMM? */
+template<typename Top, typename Tret, class OutputStage = Nothing>
+KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & ={});
+
+template<typename Top, typename Tret, class OutputStage = Nothing>
+UniqueGemmCommon<Top, Tret> gemm(const GemmArgs &args, const OutputStage & ={});
+
+template<typename Top, typename Tret, class OutputStage = Nothing>
+std::vector<KernelDescription> get_compatible_kernels(const GemmArgs &args, const OutputStage & ={});
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp b/src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp
new file mode 100644
index 0000000000..ab3a67c37c
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/Dimensions.h"
+
+#include "ndrange.hpp"
+
+#include <cassert>
+
+/* This file contains mapping between integral types used in arm_compute and arm_gemm
+ * These two codebases both require a degree of separation for the sake of modularity
+ * so maintain their own types which represent similar information.
+ */
+
+namespace arm_gemm {
+
+//we want to unify the maximum number of dimensions used beween arm_gemm and arm compute library
+constexpr std::size_t ndrange_max =
+ arm_compute::Dimensions<unsigned int>::num_max_dimensions;
+
+using ndrange_t=NDRange<ndrange_max>;
+using ndcoord_t=NDCoordinate<ndrange_max>;
+
+/* Converts an `arm_gemm::ndrange_t` to a `arm_compute::Window`
+ *
+ * As `NDRange<T>` does not not encode start positions, we specify
+ * the start to be zero in the produced `arm_compute::Window`
+ *
+ * @param [ndr] the `arm_gemm::ndrange_t` we wish to convert into a `arm_compute::Window`
+ * @returns an `arm_compute::Window` representing the same dimensional ranges as `ndr`
+ */
+inline arm_compute::Window to_window(const ndrange_t& ndr) {
+ arm_compute::Window win;
+
+ for(unsigned int i = 0; i!=ndrange_max; ++i) {
+ //populate the window with the dimensions of the NDRange
+ win.set(i, arm_compute::Window::Dimension(0, ndr.get_size(i)));
+ }
+
+ return win;
+}
+
+/*
+ * Converts an `arm_gemm::ndcoord_t` to a `arm_compute::Window`
+ *
+ * @param [ndc] the `arm_gemm::ndcoord_t` we wish to convert into a `arm_compute::Window`
+ * @returns an `arm_compute::Window` representing the same dimensional ranges as `ndc`
+ */
+inline arm_compute::Window to_window(const ndcoord_t& ndc) {
+ arm_compute::Window win;
+
+ for(unsigned int i = 0; i!=ndrange_max; ++i) {
+ const auto start = ndc.get_position(i);
+ const auto size = ndc.get_size(i);
+ const auto stop = start + size;
+
+ //populate the window with the dimensions of the NDRange
+ win.set(i, arm_compute::Window::Dimension(start, stop));
+ }
+
+ return win;
+}
+
+/** Convert an `arm_compute::Window` to an `arm_gemm::NDRange` of the same max dimensions
+ *
+ * It should be noted that `arm_compute::Window` specifies a `start()` and an `end()`
+ * where as `arm_gemm::ndrange_t` only has a size, as a result we store the delta between the range
+ *
+ * @param [win] the `arm_compute::Window` we want to convert to `arm_gemm::ndrange_t`
+ * @return the resultant ndrange_t
+ */
+inline ndrange_t to_ndrange(const arm_compute::Window& win) {
+ return {
+ static_cast<unsigned int>(win[0].end() - win[0].start()),
+ static_cast<unsigned int>(win[1].end() - win[1].start()),
+ static_cast<unsigned int>(win[2].end() - win[2].start()),
+ static_cast<unsigned int>(win[3].end() - win[3].start()),
+ static_cast<unsigned int>(win[4].end() - win[4].start()),
+ static_cast<unsigned int>(win[5].end() - win[5].start())
+ };
+}
+
+/** Convert an `arm_compute::Window` to an `arm_gemm::NDCoord` of the same max dimensions
+ *
+ * @param [win] the `arm_compute::Window` we want to convert to `arm_gemm::ndcoord_t`
+ * @return the resultant ndcoord_t
+ */
+inline ndcoord_t to_ndcoord(const arm_compute::Window& win) {
+ return {
+ { static_cast<unsigned int>(win[0].start()), static_cast<unsigned int>(win[0].end() - win[0].start()) },
+ { static_cast<unsigned int>(win[1].start()), static_cast<unsigned int>(win[1].end() - win[1].start()) },
+ { static_cast<unsigned int>(win[2].start()), static_cast<unsigned int>(win[2].end() - win[2].start()) },
+ { static_cast<unsigned int>(win[3].start()), static_cast<unsigned int>(win[3].end() - win[3].start()) },
+ { static_cast<unsigned int>(win[4].start()), static_cast<unsigned int>(win[4].end() - win[4].start()) },
+ { static_cast<unsigned int>(win[5].start()), static_cast<unsigned int>(win[5].end() - win[5].start()) }
+ };
+}
+
+} //namespace arm_gemm
diff --git a/src/core/NEON/kernels/assembly/gemm_common.hpp b/src/core/NEON/kernels/assembly/gemm_common.hpp
new file mode 100644
index 0000000000..a44b774b9d
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/gemm_common.hpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2017-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "arm_gemm_compute_iface.hpp"
+
+#include <cstddef>
+#include <cassert>
+
+#define UNUSED(x) (void)(x)
+
+namespace arm_gemm {
+
+// Abstract class for the GEMM/GEMV functions.
+//
+// GEMM implementations may be "native" (never require any input
+// permutation), "pretransposed" (require permutation up-front) or require
+// working space (permute as they go along). This interface should support
+// all of them.
+
+// The real GemmCommon class is templated based on the operand and return
+// type. This is an interface class which is independent of those types.
+class IGemmCommon {
+public:
+ /* Pass in the pointers to the arrays to be operated on and their
+ * strides. This "generic" version uses void *s, the preferred version
+ * is the one provided by templated GemmCommon (below) which takes
+ * appropriately typed pointers. If B is pretransposed (see below) then
+ * the settings for B here are ignored.
+ */
+ virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
+ const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
+ void *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
+ const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0;
+
+ /** @returns an ndrange containing ranges of the compute space which can be
+ * broken up and parallelised over
+ */
+ virtual ndrange_t get_window_size() const = 0;
+
+ /* The maximum thread count is specified when the GEMM is created. Some
+ * implementations need to know how many threads will actually run in
+ * order to work properly.
+ *
+ * In some cases, after creating the GEMM the number of threads needs to
+ * be reduced (e.g. not enough work to split across threads). This
+ * method allows the number of actual threads to be run to be set (must
+ * be equal or lower).
+ *
+ * This has an empty default implementation, as GEMMs which don't care
+ * about thread count can safely ignore this.
+ */
+ virtual void set_nthreads(int) { };
+
+ /* Whether this GEMM can be dynamically scheduled or not. */
+ virtual bool supports_dynamic_scheduling() const { return false; }
+
+ /** Main execute member fucntion
+ * @param [in] work_range specifies the range of work we want to be computed, total range defined by get_window_size()
+ * @param [in] thread_locator where are we inside of the thread space
+ * @naram [in] threadid a unique threadid
+ */
+ virtual void execute(const ndcoord_t& work_range, const ndcoord_t& thread_locator, int threadid) = 0;
+
+ /*** Working space interface (optional) ***/
+ /* Total number of bytes of temporary working space needed. If zero, it's not necessary to call set_working_space(). */
+ virtual size_t get_working_size() const { return 0; }
+ /* Provide working space buffer - the void * passed in must remain allocated for the duration of any execute calls. */
+ virtual void set_working_space(void *) { };
+
+ /*** "Pretransposed" interface (optional) ***/
+ /* Is this object set up for pretranspose? If so, pretranspose_array() needs to be called before execute(); */
+ virtual bool B_is_pretransposed() const { return false; }
+ /* Does pretranspose still need to be done? */
+ virtual bool B_pretranspose_required() const { return false; }
+ /* Total number of bytes of space needed for pretransposed arrays. */
+ virtual size_t get_B_pretransposed_array_size() const { return 0; }
+ /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */
+ /* The "real" version of this depends on the templated operand type (see below). */
+ virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0;
+ /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */
+ virtual void set_pretransposed_B_data(void *) { }
+
+ /*** "Quantized bias" interface (optional) ***/
+ /* Set the bias vector for quantized GEMMs */
+ virtual void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride)
+ {
+ UNUSED(bias);
+ UNUSED(bias_multi_stride);
+ }
+
+ // Destructor
+ virtual ~IGemmCommon() { }
+};
+
+/* "Real" GemmCommon class which is templated on the operand and return types.
+ *
+ * In addition to correctly typed versions of the functions that operate on
+ * operand and return data, this class provides a default implementation of
+ * 'set_arrays' to capture the provided arguments in protected class
+ * members, as essentially any implementation will need these.
+ */
+template<typename To, typename Tr>
+class GemmCommon : public IGemmCommon {
+protected:
+ const To *_Aptr=nullptr;
+ int _lda=0;
+ int _A_batch_stride=0;
+ int _A_multi_stride=0;
+ const To *_Bptr=nullptr;
+ int _ldb=0;
+ int _B_multi_stride=0;
+ Tr *_Cptr=nullptr;
+ int _ldc=0;
+ int _C_batch_stride=0;
+ int _C_multi_stride=0;
+ const Tr *_bias=nullptr;
+ int _bias_multi_stride=0;
+
+public:
+ /* Pass in the pointers to the arrays to be operated on and their
+ * strides (templated version with appropriate types). */
+ virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
+ const To *B, const int ldb, /* batches share B */ const int B_multi_stride,
+ Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
+ const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride) {
+ _Aptr = A;
+ _lda = lda;
+ _A_batch_stride = A_batch_stride;
+ _A_multi_stride = A_multi_stride;
+ _Bptr = B;
+ _ldb = ldb;
+ _B_multi_stride = B_multi_stride;
+ _Cptr = C;
+ _ldc = ldc;
+ _C_batch_stride = C_batch_stride;
+ _C_multi_stride = C_multi_stride;
+ _bias = bias;
+ _bias_multi_stride = bias_multi_stride;
+ }
+
+ /* Implementation of the void * overload which casts its arguments to the appropriate type. */
+ void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
+ const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
+ void *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
+ const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override {
+ set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride,
+ static_cast<const To *>(B), ldb, B_multi_stride,
+ static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride,
+ static_cast<const Tr *>(bias), bias_multi_stride);
+ }
+
+ /*** "Pretransposed" interface ***/
+
+ /* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */
+ /* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */
+ virtual void pretranspose_B_array(void *, const To *, const int, const int) { };
+
+ /* Implementation of the void * overload which casts its arguments to the appropriate type. */
+ void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override {
+ pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride);
+ }
+};
+
+template<typename GemmKernel>
+inline
+int unsigned get_total_window_size(const GemmKernel& kernel)
+{
+ auto window=kernel.get_window_size();
+
+ unsigned int total = 1;
+ for(unsigned i = 0; i != arm_gemm::ndrange_max; ++i)
+ {
+ total *= window.get_size(i);
+ }
+
+ return total;
+}
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/assembly/ndrange.hpp b/src/core/NEON/kernels/assembly/ndrange.hpp
new file mode 100644
index 0000000000..d082a3e9b8
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/ndrange.hpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2019-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include <array>
+#include <algorithm>
+#include <initializer_list>
+
+#include <cassert>
+
+namespace arm_gemm {
+
+template<unsigned int D>
+class NDRange {
+private:
+ std::array<unsigned int, D> m_sizes {};
+ std::array<unsigned int, D> m_totalsizes {};
+
+ class NDRangeIterator {
+ private:
+ const NDRange &m_parent;
+ unsigned int m_pos = 0;
+ unsigned int m_end = 0;
+
+ public:
+ NDRangeIterator(const NDRange &p, unsigned int s, unsigned int e) : m_parent(p), m_pos(s), m_end(e) { }
+
+ bool done() const {
+ return (m_pos >= m_end);
+ }
+
+ unsigned int dim(unsigned int d) const {
+ unsigned int r = m_pos;
+
+ if (d < (D - 1)) {
+ r %= m_parent.m_totalsizes[d];
+ }
+
+ if (d > 0) {
+ r /= m_parent.m_totalsizes[d-1];
+ }
+
+ return r;
+ }
+
+ bool next_dim0() {
+ m_pos++;
+
+ return !done();
+ }
+
+ bool next_dim1() {
+ m_pos += m_parent.m_sizes[0] - dim(0);
+
+ return !done();
+ }
+
+ unsigned int dim0_max() const {
+ unsigned int offset = std::min(m_end - m_pos, m_parent.m_sizes[0] - dim(0));
+
+ return dim(0) + offset;
+ }
+ };
+
+public:
+ NDRange& operator=(const NDRange& rhs)=default;
+ NDRange(const NDRange& rhs) =default;
+
+ template <typename... T>
+ NDRange(T... ts)
+ : m_sizes{ts...}
+ {
+ unsigned int t=1;
+
+ for (unsigned int i=0; i<D; i++) {
+ t *= m_sizes[i];
+
+ m_totalsizes[i] = t;
+ }
+ }
+
+ NDRange(const std::array<unsigned int, D>& n)
+ : m_sizes(n)
+ {
+ unsigned int t=1;
+
+ for (unsigned int i=0; i<D; i++) {
+ t *= m_sizes[i];
+
+ m_totalsizes[i] = t;
+ }
+ }
+
+ NDRangeIterator iterator(unsigned int start, unsigned int end) const {
+ return NDRangeIterator(*this, start, end);
+ }
+
+ unsigned int total_size() const {
+ return m_totalsizes[D - 1];
+ }
+
+ unsigned int get_size(unsigned int v) const {
+ return m_sizes[v];
+ }
+};
+
+/** NDCoordinate builds upon a range, but specifies a starting position
+ * in addition to a size which it inherits from NDRange
+ */
+template<unsigned int N>
+class NDCoordinate : public NDRange<N> {
+ using int_t =unsigned int;
+ using ndrange_t = NDRange<N>;
+
+ std::array<int_t, N> m_positions {};
+public:
+ NDCoordinate& operator=(const NDCoordinate& rhs)=default;
+ NDCoordinate(const NDCoordinate& rhs) =default;
+ NDCoordinate(const std::initializer_list<std::pair<int_t, int_t>>& list)
+ {
+ std::array<int_t, N> sizes{};
+
+ std::size_t i = 0;
+ for(auto& p : list) {
+ m_positions[i]= p.first;
+ sizes[i++] = p.second;
+ }
+
+ //update the parents sizes
+ static_cast<ndrange_t&>(*this) = ndrange_t(sizes);
+ }
+
+ int_t get_position(int_t d) const {
+ assert(d < m_positions.size());
+ return m_positions[d];
+ }
+
+ void set_position(int_t d, int_t v) {
+ assert(d < size(m_positions));
+ assert(v < ndrange_t::get_size(d));
+
+ m_positions[d] = v;
+ }
+
+ int_t get_position_end(int_t d) const {
+ return get_position(d) + NDRange<N>::get_size(d);
+ }
+}; //class NDCoordinate
+
+/** @returns the number of dimensions in the NDRange which have none-1 values
+ * IE there is actual work in these dimensions that can be broken up
+ */
+template<unsigned int N>
+std::size_t ndrange_popcount(const NDRange<N>& ndr) {
+ std::size_t count = 0;
+
+ for(unsigned int d = 0; d != N; ++d) {
+ if(ndr.get_size(d) != 1)
+ ++count;
+ }
+ return count;
+}
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd.hpp b/src/core/NEON/kernels/convolution/winograd/winograd.hpp
new file mode 100644
index 0000000000..0207eedfa7
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd.hpp
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2017-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "arm_gemm.hpp"
+
+#include <cstddef>
+#include <utility>
+
+namespace winograd
+{
+
+class ITransform
+{
+ public:
+ virtual ~ITransform() = default;
+
+ /**
+ * Get the working space required to perform the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param nthreads The greatest number of threads that will be used to execute the transform.
+ * @return Size of working space required in bytes.
+ */
+ virtual size_t get_working_space_size(unsigned int nthreads=1) const = 0;
+
+ /**
+ * Set the working space to be used by the transformation.
+ *
+ * Note, the working space is only required when performing the
+ * transformation - hence it can be reused whenever the transformation is
+ * not running.
+ *
+ * @param Pointer to the working space.
+ */
+ virtual void set_working_space(void *buffer) = 0;
+
+ /**
+ * Get the window of work a given operator can perform.
+ */
+ virtual unsigned int get_window() const = 0;
+
+ /**
+ * Perform work upon a window of the transform.
+ */
+ virtual void run(unsigned int start, unsigned int stop, unsigned int threadid=0) = 0;
+};
+
+class IInputTransform : public ITransform
+{
+ public:
+ virtual ~IInputTransform() = default;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ */
+ virtual void set_input_tensor(const void *input) = 0;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_input_tensor(const void *input, int col_stride) = 0;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_input_tensor(const void *input, int row_stride, int col_stride) = 0;
+
+ /**
+ * Set the pointer to the (NHWC-ordered) tensor to be transformed.
+ * @param batch_stride Stride between batches of the tensor, measured in elements (not bytes).
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) = 0;
+
+ /**
+ * Set pointers to the matrices written by the transform.
+ * @param matrices Pointer to the start of the first matrix representing the transformed input.
+ * @param inter_matrix_stride Stride (in elements) between matrices.
+ * @param matrix_row_stride Stride (in elements) between the rows within a single matrix.
+ */
+ virtual void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0;
+};
+
+class IOutputTransform : public ITransform
+{
+ public:
+ virtual ~IOutputTransform() = default;
+
+ /**
+ * Set pointers to the matrices written by the transform.
+ * @param matrices Pointer to the start of the first matrix representing the input to the transform.
+ * @param inter_matrix_stride Stride (in elements) between matrices.
+ * @param matrix_row_stride Stride (in elements) between the rows within a single matrix.
+ */
+ virtual void set_input_matrices(const void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0;
+
+ /**
+ * Set pointer to the bias tensor (can be ignored or called with nullptr for no bias.
+ */
+ virtual void set_bias(const void *bias=nullptr) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ */
+ virtual void set_output_tensor(void *output) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_output_tensor(void *output, int col_stride) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_output_tensor(void *output, int row_stride, int col_stride) = 0;
+
+ /**
+ * Set pointer to the output tensor produced by the transform.
+ * @param batch_stride Stride between batches of the tensor, measured in elements (not bytes).
+ * @param row_stride Stride between rows of the tensor, measured in elements (not bytes).
+ * @param col_stride Stride between columns of the tensor, measured in elements (not bytes).
+ */
+ virtual void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) = 0;
+};
+
+class IWeightTransform : public ITransform
+{
+ public:
+ virtual ~IWeightTransform() = default;
+
+ /** Set pointer to the weight tensor read by the transform. */
+ virtual void set_weight_tensor(const void *weights) = 0;
+
+ /**
+ * Set pointers to the matrices written by the transform.
+ * @param matrices Pointer to the start of the first matrix representing the transformed input.
+ * @param inter_matrix_stride Stride (in elements) between matrices.
+ * @param matrix_row_stride Stride (in elements) between the rows within a single matrix.
+ */
+ virtual void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0;
+};
+
+enum class WinogradRoots
+{
+ Integers,
+};
+
+template <int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots>
+class InputTransform : public IInputTransform
+{
+ public:
+ /** Create an InputTransform operator fixed on a given problem and set of
+ * pointers.
+ */
+ InputTransform(
+ int kernel_rows, /**< Number of rows in the kernel */
+ int kernel_cols, /**< Number of columns in the kernel */
+ int n_batches, /**< Number of batches in input tensor. */
+ int n_rows, /**< Number of rows in input tensor. */
+ int n_cols, /**< Number of columns in input tensor. */
+ int n_channels, /**< Number of channels in input tensor. */
+ int padding_top, /**< Padding to apply to the top of the image. */
+ int padding_left, /**< Padding to apply to the left of the image. */
+ int padding_bottom, /**< Padding to apply to the bottom of the image. */
+ int padding_right /**< Padding to apply to the right of the image. */
+ );
+
+ InputTransform(InputTransform&) = delete;
+ InputTransform operator=(InputTransform&) = delete;
+
+ /** Set pointers to the input tensor read by the transform. */
+ void set_input_tensor(const void *input) override;
+ void set_input_tensor(const void *input, int col_stride) override;
+ void set_input_tensor(const void *input, int row_stride, int col_stride) override;
+ void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) override;
+
+ /** Set pointers to the matrices written by the transform. */
+ void set_output_matrices(void *matrices, int iter_matrix_stride, int matrix_row_stride) override;
+
+ /** Get the working space required to perform the transformation. */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const override;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ const int _n_batches, _n_rows, _n_cols, _n_channels;
+
+ private:
+ void transform_unpadded_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr
+ );
+
+ void transform_padded_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr,
+ int padding_top,
+ int padding_left,
+ int padding_bottom,
+ int padding_right
+ );
+
+ /* Tile implementation */
+ static void transform_tile(
+ int n_channels, /** @param[in] Number of channels in the tensor. */
+ const TIn* inptr_base, /** @param[in] Pointer to the base of the input tile. */
+ int input_row_stride, /** @param[in] Stride between rows of the input tensor. */
+ int input_col_stride, /** @param[in] Stride between columns of the input tensor. */
+ TOut* mptr_base, /** @param[out] Base pointer to transformed input matrices. */
+ int matrix_stride /** @param[in] Stride between matrices in the input space. */
+ );
+
+ /** Get the working space for a thread. */
+ void * get_working_space(unsigned int threadid) const;
+
+ const TIn* _inptr;
+ TOut* _outptr;
+
+ const int _overlap_rows, _overlap_cols;
+ const int _padding_top, _padding_left, _padding_bottom, _padding_right;
+ const int _tiles_M, _tiles_N;
+ int _matrix_stride, _matrix_row_stride, _matrix_batch_stride;
+ int _in_col_stride, _in_row_stride, _in_batch_stride;
+
+ const int _working_space_col_stride, _working_space_row_stride;
+ TIn *_working_space;
+};
+
+template <int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots>
+class InputTransform<InnerTileRows, 1, TIn, TOut, Roots> :
+ public InputTransform<1, InnerTileRows, TIn, TOut, Roots>
+{
+ using Base = InputTransform<1, InnerTileRows, TIn, TOut, Roots>;
+
+ public:
+ InputTransform(
+ int kernel_rows, /**< Number of rows in the kernel. */
+ int kernel_cols, /**< Number of columns in the kernel. */
+ int n_batches, /**< Number of batches in input tensor. */
+ int n_rows, /**< Number of rows in input tensor. */
+ int n_cols, /**< Number of columns in input tensor. */
+ int n_channels, /**< Number of channels in input tensor. */
+ int padding_top, /**< Padding to apply to the top of the image. */
+ int padding_left, /**< Padding to apply to the left of the image. */
+ int padding_bottom, /**< Padding to apply to the bottom of the image. */
+ int padding_right /**< Padding to apply to the right of the image. */
+ );
+
+ /** Set pointers to the input tensor read by the transform. */
+ void set_input_tensor(const void *input) override;
+ void set_input_tensor(const void *input, int col_stride) override;
+ void set_input_tensor(const void *input, int row_stride, int col_stride) override;
+ void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) override;
+};
+
+template <
+ int KernelRows, int KernelCols,
+ int InnerTileRows, int InnerTileCols,
+ typename TIn, typename TOut,
+ WinogradRoots Roots
+>
+class OutputTransform : public IOutputTransform
+{
+ public:
+ OutputTransform(
+ int n_batches, /**< Number of batches in output tensor. */
+ int n_rows, /**< Number of rows in output tensor. */
+ int n_cols, /**< Number of columns in output tensor. */
+ int n_channels, /**< Number of channels in output tensor. */
+ const arm_gemm::Activation &activation
+ );
+
+ OutputTransform(OutputTransform&) = delete;
+ OutputTransform operator=(OutputTransform&) = delete;
+
+ /** Set pointers to the matrices read by the transform. */
+ void set_input_matrices(const void *matrices, int iter_matrix_stride, int matrix_row_stride) override;
+
+ /** Set pointer to the bias tensor (can be ignored or called with nullptr for no bias */
+ void set_bias(const void *bias=nullptr) override;
+
+ /** Set pointers to the output tensor written by the transform. */
+ void set_output_tensor(void *output) override;
+ void set_output_tensor(void *output, int col_stride) override;
+ void set_output_tensor(void *output, int row_stride, int col_stride) override;
+ void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) override;
+
+ /** Get the working space required to perform the transformation. */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const override;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ static constexpr int inner_tile_rows = InnerTileRows;
+ static constexpr int inner_tile_cols = InnerTileCols;
+ static constexpr int output_tile_rows = InnerTileRows - KernelRows + 1;
+ static constexpr int output_tile_cols = InnerTileCols - KernelCols + 1;
+
+ const int _n_batches, _n_rows, _n_cols, _n_channels;
+ const TOut _output_min, _output_max;
+
+ private:
+ void transform_uncropped_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr,
+ const TOut *biases
+ );
+
+ void transform_cropped_tile(
+ unsigned int threadid,
+ int n_channels,
+ TOut *outptr,
+ const TIn *inptr,
+ const TOut *biases,
+ int pad_bottom,
+ int pad_right
+ );
+
+ /** Implementation of the tile transformation method. */
+ static void transform_tile(
+ int n_channels,
+ const TIn* matrix_base,
+ int matrix_stride,
+ const TOut* biases,
+ TOut* output,
+ int output_row_stride,
+ int output_col_stride,
+ TOut output_min,
+ TOut output_max
+ );
+
+ /** Get the working space for a thread. */
+ void * get_working_space(unsigned int threadid) const;
+
+ const TIn* _matrix_base;
+ const TOut* _biases;
+ int _matrix_stride, _matrix_row_stride, _matrix_batch_stride;
+ TOut* _outptr;
+ const int _tiles_M, _tiles_N;
+ int _out_col_stride, _out_row_stride, _out_batch_stride;
+
+ const int _working_space_col_stride, _working_space_row_stride;
+ TOut *_working_space;
+};
+
+template <
+ int KernelRows,
+ int InnerTileRows,
+ typename TIn, typename TOut,
+ WinogradRoots Roots
+>
+class OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> :
+ public OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>
+{
+ using Base = OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>;
+
+ public:
+ OutputTransform(
+ int n_batches, /**< Number of batches in output tensor. */
+ int n_rows, /**< Number of rows in output tensor. */
+ int n_cols, /**< Number of columns in output tensor. */
+ int n_channels, /**< Number of channels in output tensor. */
+ const arm_gemm::Activation &activation
+ );
+
+ /** Set pointers to the output tensor written by the transform. */
+ void set_output_tensor(void *output) override;
+ void set_output_tensor(void *output, int col_stride) override;
+ void set_output_tensor(void *output, int row_stride, int col_stride) override;
+ void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) override;
+};
+
+template <
+ int KernelRows, int KernelCols,
+ int InnerTileRows, int InnerTileCols,
+ typename TIn, typename TOut,
+ WinogradRoots Roots
+>
+class WeightTransform : public IWeightTransform
+{
+ public:
+ WeightTransform(
+ int n_output_channels, /**< Number of output channels in the kernel. */
+ int n_input_channels /**< Number of input channels in the kernel. */
+ );
+
+ WeightTransform(WeightTransform&) = delete;
+ WeightTransform operator=(WeightTransform&) = delete;
+
+ /** Set pointer to the weight tensor read by the transform. */
+ void set_weight_tensor(const void *weights) override;
+
+ /** Set pointer to the matrices written by the transform. */
+ void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) override;
+
+ /** Get the working space required to perform the transformation. */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const override;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ static const int kernel_rows = KernelRows;
+ static const int kernel_cols = KernelCols;
+ static const int inner_tile_rows = InnerTileRows;
+ static const int inner_tile_cols = InnerTileCols;
+
+ private:
+ /** Apply the transform to a tensor. */
+ static void execute(
+ int n_output_channels,
+ int n_input_channels,
+ const TIn* input,
+ TOut* output,
+ int matrix_stride,
+ int matrix_row_stride
+ );
+
+ const int _n_output_channels, _n_input_channels;
+ TOut *_matrices;
+ int _matrix_stride, _matrix_row_stride;
+ const TIn *_weights;
+};
+
+template <int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots>
+class WeightTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> :
+ public WeightTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>
+{
+ public:
+ using WeightTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::WeightTransform;
+};
+
+template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols, WinogradRoots Roots>
+class WinogradGEMM
+{
+ public:
+ // Information about the specific Winograd instance
+ static constexpr int output_tile_rows = OutputTileRows;
+ static constexpr int output_tile_cols = OutputTileCols;
+ static constexpr int kernel_rows = KernelRows;
+ static constexpr int kernel_cols = KernelCols;
+ static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1;
+ static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1;
+ static constexpr int N_GEMMS = inner_tile_rows * inner_tile_cols;
+
+ /** Transform weights from the spatial to the Winograd domain. */
+ template <typename TIn, typename TOut>
+ using WeightsTransform = WeightTransform<
+ KernelRows, KernelCols, inner_tile_rows, inner_tile_cols,
+ TIn, TOut, Roots
+ >;
+
+ /** Transform input feature maps from the spatial to the Winograd domain.
+ */
+ template <typename TIn, typename TOut>
+ using InputTransform = InputTransform<
+ inner_tile_rows, inner_tile_cols, TIn, TOut, Roots
+ >;
+
+ /** Transform output feature maps from the Winograd to the spatial domain.
+ */
+ template <typename TIn, typename TOut>
+ using OutputTransform = OutputTransform<
+ KernelRows, KernelCols, inner_tile_rows, inner_tile_cols,
+ TIn, TOut, Roots
+ >;
+
+ /** Perform a convolution.
+ */
+ template <typename TOut, typename TIn, typename TInGEMM=TIn, typename TOutGEMM=TOut>
+ class Convolution
+ {
+ public:
+ // Information about the typed Winograd instance
+ typedef TOut OutputType;
+ typedef TOutGEMM GemmOutputType;
+ typedef TInGEMM GemmInputType;
+ typedef TIn InputType;
+
+ /** Get the output shape of a convolution. */
+ static std::pair<unsigned int, unsigned int> get_output_shape(
+ const std::pair<unsigned int, unsigned int> input_shape,
+ bool padding_same);
+
+ /** Get the memory required to store the kernel transformed into the
+ * Winograd domain.
+ */
+ static size_t get_kernel_storage_size(unsigned int n_input_channels,
+ unsigned int n_output_channels);
+
+ /** Get the memory required to store the input tensor transformed into
+ * the Winograd domain.
+ */
+ static size_t get_input_storage_size(
+ unsigned int n_batches, // Number of batches
+ unsigned int n_rows, // Number of input rows
+ unsigned int n_cols, // Number of input columns
+ unsigned int n_channels, // Number of input channels
+ bool padding_same);
+
+ /** Get the memory required to store the output tensor in the Winograd
+ * domain.
+ */
+ static size_t get_output_storage_size(
+ unsigned int n_batches, // Number of batches
+ unsigned int n_rows, // Number of output rows
+ unsigned int n_cols, // Number of output columns
+ unsigned int n_channels // Number of output channels
+ );
+
+ /** Get the memory required to apply a Winograd operator to some input.
+ */
+ static size_t get_working_space_size(
+ unsigned int n_batches,
+ unsigned int n_rows, // Number of input rows
+ unsigned int n_cols, // Number of input columns
+ unsigned int n_input_channels, // Number of input channels
+ unsigned int n_output_channels, // Number of output channels
+ bool padding_same);
+
+ /* Get the memory required by a single "input" matrix.
+ */
+ static size_t get_input_matrix_size(
+ unsigned int n_batches, // Number of batches
+ unsigned int n_rows, // Number of input rows
+ unsigned int n_cols, // Number of input columns
+ unsigned int n_channels, // Number of input channels
+ bool padding_same);
+
+ static int get_input_matrix_stride(
+ unsigned int n_batches, // Number of batches
+ unsigned int n_rows, // Number of input rows
+ unsigned int n_cols, // Number of input columns
+ unsigned int n_channels, // Number of input channels
+ bool padding_same);
+
+ /* Get the memory required by a single "output" matrix.
+ */
+ static size_t get_output_matrix_size(
+ unsigned int n_batches, // Number of batches
+ unsigned int n_rows, // Number of output rows
+ unsigned int n_cols, // Number of output columns
+ unsigned int n_channels // Number of output channels
+ );
+
+ static int get_output_matrix_stride(
+ unsigned int n_batches, // Number of batches
+ unsigned int n_rows, // Number of output rows
+ unsigned int n_cols, // Number of output columns
+ unsigned int n_channels // Number of output channels
+ );
+
+ /* Get the memory required by a single "kernel" matrix.
+ */
+ static size_t get_kernel_matrix_size(unsigned int n_input_channels,
+ unsigned int n_output_channels);
+ static int get_kernel_matrix_stride(unsigned int n_input_channels,
+ unsigned int n_output_channels);
+
+ static constexpr int M_BLOCK = 4; /** Size of block used by GEMM. */
+ static constexpr int N_BLOCK = 16; /** Size of block used by GEMM. */
+ };
+};
+
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
new file mode 100644
index 0000000000..ed8fede385
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2017-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include "arm_gemm_local.hpp"
+#include "arm_gemm.hpp"
+#include "winograd.hpp"
+
+namespace winograd
+{
+
+
+class IWinogradConvolutionLayer
+{
+ public:
+ virtual ~IWinogradConvolutionLayer() = default;
+
+ virtual unsigned int weight_transform_get_window(void) const = 0;
+ virtual void weight_transform_run(unsigned int start, unsigned int stop) = 0;
+
+ virtual IInputTransform& input_transform(void) = 0; // Expose the input transform
+ virtual IOutputTransform& output_transform(void) = 0; // Expose the output transform
+ virtual arm_gemm::IGemmCommon *gemm(void) = 0; // Expose the underlying GEMM
+};
+
+/** Example of how to construct an ACL-like interface.
+ *
+ * Use `get_weight_storage_size`, `get_input_storage_size` and
+ * `get_output_storage_size` to allocate memory for the convolution engine.
+ * Then create a `WinogradConvolutionLayer`.
+ *
+ * Initialise the weights using `weights_transform.run(...)`.
+ *
+ * For each inference:
+ * 1. Transform the inputs to the Winograd domain using `input_transform.run(...)`
+ * 2. Perform a number of GEMMs using `gemms.run(...)`
+ * 3. Transform the output to the spatial domain using `output_transform.run(...)`
+ */
+template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
+ typename TIn, typename TInGEMM, typename TOutGEMM, typename TOut,
+ WinogradRoots Roots>
+class WinogradConvolutionLayer : public IWinogradConvolutionLayer
+{
+ public:
+ using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, Roots>;
+ using WeightsTransform = typename WinogradBase::template WeightsTransform<TIn, TInGEMM>;
+ using InputTransform = typename WinogradBase::template InputTransform<TIn, TInGEMM>;
+ using WinogradConv = typename WinogradBase::template Convolution<TOut, TIn, TInGEMM, TOutGEMM>;
+ using OutputTransform = typename WinogradBase::template OutputTransform<TOutGEMM, TOut>;
+
+ private:
+ static constexpr int InnerTileRows = OutputTileRows + KernelRows - 1;
+ static constexpr int InnerTileCols = OutputTileCols + KernelCols - 1;
+ static constexpr int N_GEMMS = InnerTileRows * InnerTileCols;
+
+ const int _n_output_rows, _n_output_cols;
+ const int _kernel_matrix_stride, _kernel_matrix_row_stride;
+ const int _input_matrix_stride, _input_matrix_row_stride;
+ const int _output_matrix_stride, _output_matrix_row_stride;
+ const int _tile_rows, _tile_cols;
+ const int _m, _k, _n;
+
+ WeightsTransform weights_transform; /** Operator to transform weights to Winograd domain. */
+ InputTransform _input_transform; /** Operator to transform input to Winograd domain. */
+ const arm_gemm::GemmArgs gemm_args;
+ arm_gemm::UniqueGemmCommon<TInGEMM, TOutGEMM> gemms; /** Operator to perform multiple GEMMs. */
+ OutputTransform _output_transform; /** Operator to transform output from Winograd domain. */
+
+ public:
+
+ /** Determine how much memory (in units of TIn) to allocate for the
+ * transformed weights.
+ */
+ static unsigned int get_weight_storage_size(
+ const int n_output_channels, /** Number of output feature maps. */
+ const int n_input_channels /** Number of input feature maps. */
+ );
+
+ static unsigned int get_weight_stride(
+ const int n_output_channels, /** Number of output feature maps. */
+ const int n_input_channels /** Number of input feature maps. */
+ );
+
+ static unsigned int get_weight_multi_stride(
+ const int n_output_channels, /** Number of output feature maps. */
+ const int n_input_channels /** Number of input feature maps. */
+ );
+
+ /** Determine how much memory (in units of TIn) to allocate for the
+ * transformed input.
+ */
+ static unsigned int get_input_storage_size(
+ const int n_batches, /** Number of batches in the input tensor. */
+ const int n_channels, /** Number of feature maps in the input tensor. */
+ const int n_rows, /** Number of rows in each feature map. */
+ const int n_cols, /** Number of columns in each feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Get the row stride for the A matrix in the Winograd domain. */
+ static unsigned int get_input_stride(
+ const int n_batches, /** Number of batches in the input tensor. */
+ const int n_channels, /** Number of feature maps in the input tensor. */
+ const int n_rows, /** Number of rows in each feature map. */
+ const int n_cols, /** Number of columns in each feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Get the stride between A matrices in the Winograd domain. */
+ static unsigned int get_input_multi_stride(
+ const int n_batches, /** Number of batches in the input tensor. */
+ const int n_channels, /** Number of feature maps in the input tensor. */
+ const int n_rows, /** Number of rows in each feature map. */
+ const int n_cols, /** Number of columns in each feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Determine how much memory (in units of TOut) to allocate for the
+ * (Winograd domain) output.
+ */
+ static unsigned int get_output_storage_size(
+ const int n_batches, /** Number of batches in the output tensor. */
+ const int n_rows, /** Number of rows in each feature map of the input tensor. */
+ const int n_cols, /** Number of columns in each feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ static unsigned int get_output_stride(
+ const int n_batches, /** Number of batches in the output tensor. */
+ const int n_rows, /** Number of rows in each feature map of the input tensor. */
+ const int n_cols, /** Number of columns in each feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ static unsigned int get_output_multi_stride(
+ const int n_batches, /** Number of batches in the output tensor. */
+ const int n_rows, /** Number of rows in each feature map of the input tensor. */
+ const int n_cols, /** Number of columns in each feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Get the shape (rows, cols) of a feature map of the output tensor. */
+ static std::pair<int, int> get_output_feature_map_shape(
+ const int n_input_rows, /** Number of rows in the input feature map. */
+ const int n_input_cols, /** Number of columns in the input feature map. */
+ const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
+ );
+
+ /** Create a new Winograd convolution layer.
+ */
+ WinogradConvolutionLayer(
+ const arm_gemm::CPUInfo &cpuinfo, /** Describes CPU properties. */
+ const int n_threads, /** Maximum number of threads used to execute the convolution. */
+ const int n_batches, /** Number of batches in the input and output tensors. */
+ const int n_input_channels, /** Number of feature maps in a batch of the input tensor. */
+ const int n_input_rows, /** Number of rows in a feature map of the input tensor. */
+ const int n_input_cols, /** Number of columns in a feature map of the input tensor. */
+ const int n_output_channels, /** Number of feature maps in the output tensor. */
+ const bool same_padding, /** Use "SAME" padding, otherwise use "VALID". */
+ const arm_gemm::Activation &activation,
+ const TIn* const weights, /** Pointer to weight tensor in spatial domain. Must be ordered as "Height x Rows x Input Feature Maps x Output Feature Maps. */
+ TInGEMM* const weights_storage, /** Pointer to storage for weight tensor in the Winograd domain. Must be at least the size returned by `get_weight_storage_size`. */
+ const TIn* const input, /** Pointer to NHWC ordered input tensor, in the spatial domain. */
+ TInGEMM* const winograd_input, /** Pointer to working space for the input tensor in the Winograd domain. Must be at least the size returned by `get_input_storage_size`. */
+ const TOut* const biases, /** Pointer to biases vector. Pass nullptr if no bias is provided. */
+ TOut* const output, /** Pointer to NHWC ordered output tensor, in the spatial domain. */
+ TOutGEMM* const winograd_output, /** Pointer to working space for the output tensor in the Winograd domain. Must be at least the size returned by `get_output_storage_size`. */
+ const bool pretranspose_B=true, /** Hint that the B matrix can be pretransposed. */
+ arm_gemm::GemmConfig *gemm_cfg=nullptr /** Pointer to GEMM configuration. */
+ );
+
+ /* Utility methods for interacting with the layer. */
+ unsigned int weight_transform_get_window(void) const;
+ void weight_transform_run(const unsigned int start, const unsigned int stop);
+
+ IInputTransform& input_transform(void);
+ IOutputTransform& output_transform(void);
+
+ /* Get a pointer to the GEMM underlying the Winograd transform. */
+ arm_gemm::IGemmCommon *gemm(void);
+};
+
+}
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index 24bd7d7a8c..7a1f0850b2 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -23,10 +23,14 @@
*/
#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
+#include "src/core/NEON/kernels/assembly/arm_gemm.hpp"
+
#include "arm_compute/core/CPP/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
+#include "src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h"
+
#include <arm_neon.h>
namespace arm_compute
@@ -433,7 +437,6 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run()
{
const int granule_threshold = 200;
scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold);
-
}
else if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && _d->info()->data_type() == DataType::F32)
{
@@ -467,6 +470,7 @@ void create_arm_gemm_quant(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &a
const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const GEMMInfo &gemm_info,
IWeightsManager *weights_manager)
{
+ ARM_COMPUTE_UNUSED(activation);
INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d, gemm_info);
const CPUInfo &ci = NEScheduler::get().cpu_info();
unsigned int num_threads = NEScheduler::get().num_threads();
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index d567a18709..a74e710c62 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -25,16 +25,16 @@
#include "arm_compute/core/CPP/Validate.h"
#include "arm_compute/core/Error.h"
-#include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
+#include "src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#include "support/MemorySupport.h"
#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd.hpp"
+#include "src/core/NEON/kernels/convolution/winograd/winograd.hpp"
namespace arm_compute
{