aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
blob: f3b2bb10ed6a21fd919bd38ba581fda5b3a9532c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
/*
 * Copyright (c) 2017 ARM Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#pragma once

#include "arm_compute/core/NEON/kernels/convolution/common/alloc.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp"
#include "gemm.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/profiler.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/shims.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"

#include <thread>
#include <utility>
#include <vector>

// Generic Winograd implementation using GEMM
namespace winograd
{

template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
class WinogradGEMM
{
  public:
    // Information about the specific Winograd instance
    static constexpr int output_tile_rows = OutputTileRows;
    static constexpr int output_tile_cols = OutputTileCols;
    static constexpr int kernel_rows = KernelRows;
    static constexpr int kernel_cols = KernelCols;
    static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1;  // TODO Check
    static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1;  // TODO Check
    static constexpr int N_GEMMS = inner_tile_rows * inner_tile_cols;

    /** Transform weights from the spatial to the Winograd domain. */
    template <typename T>
    struct WeightsTransform
    {
      /** Get the bytes read during the transform. */
      static inline size_t bytes_read(const KernelShape &shape)
      {
        return shape.size() * sizeof(T);
      }

      /** Get the bytes written during the transform. */
      static inline size_t bytes_written(const KernelShape &shape)
      {
        const int inner_tile_size = inner_tile_rows * inner_tile_cols;
        return (inner_tile_size * shape.n_input_channels *
                shape.n_output_channels * sizeof(T));
      }

      /** Get the count of operations performed by the transform. */
      static int ops_performed(const KernelShape &shape);

      /** Apply the transform to a tensor. */
      static void execute(
        const int n_output_channels,
        const int n_input_channels,
        const T* const input,
        T* const output,
        const int matrix_stride,
        const int matrix_row_stride
      );

      /** Create a WeightsTransform operator fixed on a given problem and set
       * of pointers.
       */
      WeightsTransform(
        const T* const input,
        T* const output,
        const int matrix_stride,       /** Stride across matrices in the output. */
        const int matrix_row_stride,   /** Stride across rows of the matrix. */
        const int n_output_channels,   /** Number of filters. */
        const int n_input_channels     /** Number of channels in each filter. */
      );

      /** Get the window of work a given operator can perform. */
      unsigned int get_window() const;

      /** Perform work upon a window of the input. */
      void run(const unsigned int start, const unsigned int stop);

      private:
        const T* const inptr;         /** Fixed pointer to input data. */
        T* const outptr;              /** Fixed pointer to output memory. */
        const int matrix_stride;      /** Stride between output matrices. */
        const int matrix_row_stride;  /** Stride within output matrices. */
        const int n_output_channels;  /** Number of filters. */
        const int n_input_channels;   /** Number of channels in each filter. */
    };

    /** Transform input feature maps from the spatial to the Winograd domain.
     */
    template <typename T>
    struct InputTransform
    {
      /** Get the bytes read during the transform. */
      static size_t bytes_read(const Tensor4DShape &shape)
      {
        return shape.size() * sizeof(T);
      }

      /** Get the bytes written during the transform. */
      static size_t bytes_written(const Tensor4DShape &shape)
      {
        const int M = iceildiv(shape.n_rows, inner_tile_rows) *
                      iceildiv(shape.n_cols, inner_tile_cols);
        const int K = shape.n_channels;
        return inner_tile_rows * inner_tile_cols * M * K * sizeof(T);
      }

      /** Get the count of operations performed by the transform. */
      static int ops_performed(const Tensor4DShape &shape);

      /** Apply the transform to a tensor. */
      static void execute(
          const T *inptr,
          const Tensor4DShape& input_shape,
          const PaddingType padding_type,
          const int tile_M,
          const int tile_N,
          T *outptr_base,
          const int matrix_stride,
          const int matrix_batch_stride,
          const int matrix_row_stride
      );

      /***********************************************************************/
      /** Create an InputTransform operator fixed on a given problem and set of
       * pointers.
       */
      InputTransform(
          const T* const input,        /** Input tensor data */
          const int n_batches,         /** Number of batches in input tensor. */
          const int n_rows,            /** Number of rows in input tensor. */
          const int n_cols,            /** Number of columns in input tensor. */
          const int n_channels,        /** Number of channels in input tensor. */
          const PaddingType padding,   /** Padding type. */
          T* const output,             /** Base of output matrices. */
          const int matrix_stride,     /** Stride between output matrices. */
          const int matrix_row_stride  /** Stride within matrices. */
      );

      /** Get the winodw of work a given operator can perform. */
      unsigned int get_window() const;

      /** Perform work upon a window of the input. */
      void run(const unsigned int start, const unsigned int stop);
      /***********************************************************************/

      private:
        static void process_tile_row(
          const int tile_N,
          int n_channels,
          const T* const input_base,
          const int input_row_stride,
          const int input_col_stride,
          T* const matrix_base,
          const int matrix_stride,
          const int matrix_row_stride,
          const int row_pad_top,
          const int row_pad_left,
          const int row_pad_bottom,
          const int n_cols
        );

        static constexpr int max_pad_bottom = inner_tile_rows - 1;
        static constexpr int max_pad_right = inner_tile_cols - 1;

        /** Process a single tile of the input tensor. */
        template <int pad_top, int pad_left, int pad_bottom, int pad_right>
        static void process_tile(int, const T*, int, int, T*, int);

        // Array of methods to transform tiles of the input tensor.
        typedef void (*TileFn)(int, const T*, int, int, T*, int);
        static const TileFn tile_fns[2][2][max_pad_bottom][max_pad_right];

        /* Member values for instance-based API. */
        const T* const _inptr;
        T* const _outptr;
        const int _n_batches, _n_rows, _n_cols, _n_channels, _matrix_stride,
                  _matrix_row_stride, _tiles_M, _tiles_N;
        const PaddingType _padding_type;
    };

    /** Transform output feature maps from the Winograd to the spatial domain.
     */
    template <typename T>
    struct OutputTransform
    {
      /** Get the bytes read during the transform. */
      static size_t bytes_read(const Tensor4DShape &shape);

      /** Get the bytes written during the transform. */
      static size_t bytes_written(const Tensor4DShape &shape);

      /** Get the count of operations performed by the transform. */
      static int ops_performed(const Tensor4DShape &shape);

      /** Apply the transform to create a tensor. */
      static void execute(
        const Tensor4DShape &output_shape,
        const T* const matrix_base,
        const int matrix_stride,
        const int matrix_row_stride,
        const T* const biases,
        T* const output
      );

      /***********************************************************************/
      /** Create an OutputTransform operator fixed on a given problem and set
       * of pointers.
       */
      OutputTransform(
        const T* const matrix_base,   /** Pointer to base of matrices. */
        const int matrix_stride,      /** Stride between matrices. */
        const int matrix_row_stride,  /** Stride within a matrix. */
        const T* const biases,        /** Pointer to biases vector. */
        T* const output,              /** Pointer to output tensor. */
        const int n_batches,          /** Number of batches in output tensor. */
        const int n_rows,             /** Number of rows in output tensor. */
        const int n_cols,             /** Number of columns in output tensor. */
        const int n_channels          /** Number of channels in output tensor. */
      );

      /** Get the window of work a given operator can perform. */
      unsigned int get_window() const;

      /** Perform work upon a window of the input. */
      void run(const unsigned int start, const unsigned int stop);
      /***********************************************************************/

      private:
        static void process_tile_row(
          const int tile_N,
          const int n_channels,
          const T* const matrix_base,
          const int matrix_stride,
          const int matrix_row_stride,
          const T* const biases,
          T* const output,
          const int output_row_stride,
          const int output_col_stride,
          const int row_pad_bottom,
          const int row_pad_right
        );

        // Limits on the amount of anti-padding to be applied
        static constexpr int max_pad_bottom = output_tile_rows;
        static constexpr int max_pad_right = output_tile_cols;

        /** Prepare a single tile of the output tensor. */
        template <int pad_bottom, int pad_right>
        static void process_tile(int, const T*, int, const T*, T*, int, int);

        // Array of methods to produce tiles of output tensor.
        typedef void (*TileFn)(int, const T*, int, const T*, T*, int, int);
        static const TileFn tile_fns[max_pad_bottom][max_pad_right];

        /** Member constants for instances of the transform. */
        const T* const _matrix_base;
        const T* const _biases;
        const int _matrix_stride, _matrix_row_stride;
        T* const _outptr;
        const int _n_batches, _n_rows, _n_cols, _n_channels, _tile_M, _tile_N;
    };

    /** Perform a convolution.
     */
    template <typename TOut, typename TIn>
    class Convolution
    {
      public:
        // Information about the typed Winograd instance
        typedef TOut OutputType;
        typedef TIn InputType;

        /** Create a new Winograd operator. */
        Convolution(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding,
          void *kernel_storage=NULL
        );

        Convolution(const Convolution&) = delete;
        Convolution operator=(const Convolution&) = delete;

        /** Create a new Winograd operator and initialise the weights. */
        Convolution(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding,
          const TIn* const kernel,
          void *kernel_storage=NULL,
          void *transform_working_space=NULL
        );

        /** Clean up a convolution engine. */
        ~Convolution();

        /** Transform the weights into the Winograd domain. */
        template <typename WeightsTransform=WeightsTransform<TIn>>
        void transform_weights(
          const TIn* const kernel,
          void *transform_working_space=NULL
        );

        /* Apply the Winograd operator to some input. */
        void execute(
          TOut* const output,
          const TIn* const input,
          const TOut* const biases,
          void* working_space=NULL,
          const int n_threads=1
        );

        /* Apply the Winograd operator to some input. */
        void execute(
          TOut* const output,
          const TIn* const input,
          const TOut* const biases,
          const int n_threads
        );

        /** Get the output shape of a convolution. */
        static Tensor4DShape get_output_shape(
          const KernelShape &kernel_shape,
          const Tensor4DShape &in_shape,
          const PaddingType padding
        );

        /* Get the memory required to transform the kernel.
         */
        static size_t get_kernel_transform_working_size(const KernelShape &shape);

        /** Get the memory required to store the kernel transformed into the
         * Winograd domain.
         */
        static size_t get_kernel_storage_size(const KernelShape &shape);

        /** Get the memory required to store the input tensor transformed into
         * the Winograd domain.
         */
        static size_t get_input_storage_size(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        /** Get the memory required to store the output tensor in the Winograd
         * domain.
         */
        static size_t get_output_storage_size(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        /** Get the memory required to apply a Winograd operator to some input.
         */
        static size_t get_working_space_size(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        /* Get the memory required by a single "input" matrix.
         */
        static size_t get_input_matrix_size(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        static int get_input_matrix_stride(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        /* Get the memory required by a single "output" matrix.
         */
        static size_t get_output_matrix_size(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        static int get_output_matrix_stride(
          const KernelShape &kernel_shape,
          const Tensor4DShape &input_shape,
          const PaddingType padding_type
        );

        /* Get the memory required by a single "kernel" matrix.
         */
        static size_t get_kernel_matrix_size(const KernelShape &shape);
        static int get_kernel_matrix_stride(const KernelShape &shape);

        static constexpr int M_BLOCK = 4;   /** Size of block used by GEMM. */
        static constexpr int N_BLOCK = 16;  /** Size of block used by GEMM. */

      private:
        const KernelShape kernel_shape;  /** Shape of the kernel to be applied. */
        TIn *kernel_matrices[N_GEMMS];   /** Pointers into the kernel matrices. */
        const int kernel_matrix_row_stride;  /** Stride within the kernel matrices. */

        const bool manage_kernel_storage;  /** Kernel storage is managed by the instance. */
        void* const _kernel_storage;       /** Base pointer for kernel storage. */

        const Tensor4DShape input_shape;  /** Shape of the input tensor. */
        const PaddingType padding;        /** Padding applied by the operator. */

        const Tensor4DShape output_shape;  /** Output shape produced by the operator. */

        const int tile_rows;  /** Number of rows of tiles. */
        const int tile_cols;  /** Number of columns of tiles. */
        const int M, K, N;    /** Sizes of underlying fundamental matrix multiplications. */

        profiler prof;
    };
};

}  // namespace winograd