aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
blob: fa06a0078b44689512f9cc616280f71541cb61f9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
/*
 * Copyright (c) 2021 Arm Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#pragma once

#include "pool_common.hpp"
#include "utils.hpp"

namespace arm_conv {
namespace pooling {

template <class strategy>
class PoolingDepthfirstGeneric : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type>
{
  using TInput = typename strategy::operand_type;
  using TOutput = typename strategy::return_type;

  const PoolingArgs m_args;  // Copy of arguments

  unsigned int input_rows(void) const
  {
    return m_args.pool_window.rows;
  }

  unsigned int input_cols(void) const
  {
    return m_args.pool_window.cols;
  }

  public:
  PoolingDepthfirstGeneric(const PoolingArgs &args) : m_args(args)
  {
  }

  PoolingDepthfirstGeneric(PoolingDepthfirstGeneric &) = delete;
  PoolingDepthfirstGeneric &operator=(PoolingDepthfirstGeneric &) = delete;

  size_t sizeof_input_pointer_array(void) const
  {
    return sizeof(TInput *) * input_rows() * input_cols();
  }

  size_t get_working_size(unsigned int num_threads) const override
  {
    return num_threads * sizeof_input_pointer_array();
  }

  void execute(
    const void *const input,
    void *const output,
    void *const working_space,
    unsigned int thread_id,
    unsigned int num_threads
  ) const override
  {
    const size_t ld_input_col = m_args.n_channels;
    const size_t ld_input_row = ld_input_col * m_args.input_cols;
    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
    const size_t ld_output_col = ld_input_col;
    const size_t ld_output_row = ld_output_col * m_args.output_cols;
    const size_t ld_output_batch = ld_output_row * m_args.output_rows;

    execute(
      input, ld_input_col, ld_input_row, ld_input_batch,
      output, ld_output_col, ld_output_row, ld_output_batch,
      working_space,
      thread_id, num_threads
    );
  }

  void execute(
    const void *const input,
    size_t ld_input_col,
    size_t ld_input_row,
    size_t ld_input_batch,
    void *const output,
    size_t ld_output_col,
    size_t ld_output_row,
    size_t ld_output_batch,
    void *const working_space,
    unsigned int thread_id,
    unsigned int num_threads
  ) const override
  {
    execute(
      m_args.n_batches, m_args.input_rows, m_args.input_cols,
      m_args.n_channels,
      input, ld_input_col, ld_input_row, ld_input_batch,
      m_args.padding,
      m_args.output_rows, m_args.output_cols,
      output, ld_output_col, ld_output_row, ld_output_batch,
      working_space,
      thread_id, num_threads
    );
  }

  void execute(
    unsigned int batches,
    unsigned int height,
    unsigned int width,
    unsigned int channels,
    const void *const _input,
    size_t ld_input_col,
    size_t ld_input_row,
    size_t ld_input_batch,
    const PaddingValues &padding,
    unsigned int output_height,
    unsigned int output_width,
    void *const _output,
    size_t ld_output_col,
    size_t ld_output_row,
    size_t ld_output_batch,
    void *const _working_space,
    unsigned int thread_id,
    unsigned int num_threads
  ) const override
  {
    strategy strat(m_args.cpu_info);
#ifdef CYCLE_PROFILING
    arm_gemm::profiler prof;
#endif // CYCLE_PROFILING

    const unsigned int roundup_output_rows = roundup(output_height, num_threads);
    const unsigned int rows_per_thread = roundup_output_rows / num_threads;
    int start_out_height = static_cast<int>(thread_id * rows_per_thread);
    int end_out_height = std::min<int>(output_height, static_cast<int>((thread_id + 1) * rows_per_thread));

    unsigned int start_channel = 0;
    unsigned int end_channel = channels;
    if(output_height == 1)
    {
      const unsigned int channels_per_thread = roundup(channels, num_threads) / num_threads;
      start_channel = thread_id * channels_per_thread;
      end_channel = std::min(start_channel + channels_per_thread, channels);

      // Reset start and end rows
      start_out_height = 0;
      end_out_height = output_height;
    }

    if(start_channel >= end_channel)
    {
        // Early exit in case of multiple threads parallelising on channels
        return;
    }

    // Cast input and output pointers into the right types
    const TInput *const inptr = static_cast<const TInput *>(_input) + start_channel;
    TOutput *const outptr = static_cast<TOutput *>(_output) + start_channel;

    // Grab the input pointer array
    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
    const TInput **const inptr_array = reinterpret_cast<const TInput **>(working_space + thread_id * sizeof_input_pointer_array());

    // For each output tile, construct the requisite set of pointers and call
    // into the kernel.
    for (unsigned int batch = 0; batch < batches; batch++)
    {
      // Get batch pointers
      const auto inptr_batch = inptr + batch * ld_input_batch;
      auto outptr_row = outptr + batch * ld_output_batch + start_out_height * ld_output_row;

      for (int out_i = start_out_height; out_i < end_out_height; out_i++)
      {
        const int start_in_i = out_i * m_args.pool_stride.rows - padding.top;
        const int end_in_i = start_in_i + m_args.pool_window.rows;

        // Compute top/bottom padding
        const auto pad_top = static_cast<unsigned int>(std::max(0 - start_in_i, 0));
        const auto pad_bottom = static_cast<unsigned int>(std::max<int>(end_in_i - height, 0));
        const auto valid_rows = input_rows() - pad_top - pad_bottom;

        auto outptr_col = outptr_row;
        auto inptr_row = inptr_batch + (start_in_i + pad_top) * ld_input_row;

        for (int out_j = 0, start_in_j = -padding.left;
             out_j < static_cast<int>(output_width);
             out_j++, start_in_j += m_args.pool_stride.cols)
        {
          const int end_in_j = start_in_j + m_args.pool_window.cols;

          // Compute left/right padding
          const auto pad_left = static_cast<unsigned int>(std::max(0 - start_in_j, 0));
          const auto pad_right = static_cast<unsigned int>(std::max<int>(0, end_in_j - width));
          const auto valid_cols = input_cols() - pad_left - pad_right;

          // Construct the input pointer array - fill in all valid points
          // contiguously.
          const TInput **ptrs = inptr_array;
          const TInput *rowptr = inptr_row + (start_in_j + pad_left) * ld_input_col;
          for (auto i = 0u; i < valid_rows; i++)
          {
            const TInput *colptr = rowptr;
            for (auto j = 0u; j < valid_cols; j++)
            {
              *(ptrs++) = colptr;
              colptr += ld_input_col;
            }
            rowptr += ld_input_row;
          }

          // Compute the number of valid cells
          const auto valid_cells = valid_rows * valid_cols;
          const auto window_cells = m_args.exclude_padding ? valid_cells : input_rows() * input_cols();

          // Get the output pointer for this call
          TOutput *outptr = outptr_col;
          outptr_col += ld_output_col;

#ifdef CYCLE_PROFILING
          // TODO Work number
          auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(strategy::out_rows() * strategy::out_cols() * strategy::pool_rows() * strategy::pool_cols()));
#endif // CYCLE_PROFILING
          strat.kernel(window_cells, valid_cells, end_channel - start_channel, inptr_array, outptr);
        }

        outptr_row += ld_output_row;
      }
    }
  }
};

}  // namespace pooling
}  // namespace arm_conv