aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
blob: c560aa8c8f1d6f92bbb03921aa867d4d395d8826 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/*
 * Copyright (c) 2017 ARM Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"


namespace winograd
{
  template <>
  template <>
  void WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::execute(
    const int n_output_channels,
    const int n_input_channels,
    const float* const input,  // NOTE: Data in HWIO order
    float* const output,
    const int matrix_stride,
    const int matrix_row_stride
  )
  {
    // Get pointers to each cell of the weight tensor
    const auto weight_col_stride = n_input_channels * n_output_channels;
    const float *inptrs[3];
    for (int j = 0; j < 3; j++)
    {
      inptrs[j] = input + j*weight_col_stride;
    }

    // For each input channel
    for (int ic = 0; ic < n_input_channels; ic++)
    {
      float *outptr = output + ic * matrix_row_stride;

      // For each output channel
      int channels_remaining = n_output_channels;
      for (; channels_remaining; channels_remaining--)
      {
        // Matrices used and computed in this kernel
        float w[3], V[inner_tile_cols];

        // Read weights
        for (int j = 0; j < 3; j++)
        {
          w[j] = *(inptrs[j]++);
        }

        // Compute V = w WT
        V[0] = (w[0]*-1) / 36.0f;
        V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
        V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
        V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
        V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
        V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
        V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
        V[7] = (w[2]*1) / 1;

        // Store the transformed weights
        for (int j = 0; j < inner_tile_cols; j++)
        {
          *(outptr + j*matrix_stride) = V[j];
        }
        outptr++;
      }
    }
  }

  template <>
  template <>
  int WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
  {
    (void) shape;
    return 0;  // TODO
  }

  template <>
  template <>
  void WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::execute(
    const int n_output_channels,
    const int n_input_channels,
    const float* const input,  // NOTE: Data in HWIO order
    float* const output,
    const int matrix_stride,
    const int matrix_row_stride
  )
  {
    // Redirect to the 1xN implementation
    WinogradGEMM<1, 6, 1, 3>::template WeightsTransform<float>::execute(
      n_output_channels, n_input_channels, input, output, matrix_stride,
      matrix_row_stride
    );
  }

  template <>
  template <>
  int WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
  {
    (void) shape;
    return 0;  // TODO
  }

  template struct WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>;
  template struct WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>;
}