aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph2/frontend/Layers.h
blob: 7ea23e06844c2517248b0121c39e6a1d787b9632 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
/*
 * Copyright (c) 2018 ARM Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#ifndef __ARM_COMPUTE_GRAPH2_LAYERS_H__
#define __ARM_COMPUTE_GRAPH2_LAYERS_H__

#include "arm_compute/graph2/GraphBuilder.h"
#include "arm_compute/graph2/Types.h"
#include "arm_compute/graph2/frontend/ILayer.h"
#include "arm_compute/graph2/frontend/IStream.h"
#include "arm_compute/graph2/frontend/SubStream.h"

#include "arm_compute/core/utils/misc/Utility.h"

#include <memory>
#include <string>

namespace arm_compute
{
namespace graph2
{
namespace frontend
{
/** Input Layer */
class InputLayer final : public ILayer
{
public:
    /** Construct an input layer.
     *
     * @param[in] desc     Description of input tensor.
     * @param[in] accessor Accessor to get input tensor data from.
     */
    InputLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor)
        : _desc(desc), _accessor(std::move(accessor))
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams common_params = { "", s.hints().target_hint };
        return GraphBuilder::add_input_node(s.graph(), common_params, _desc, std::move(_accessor));
    }

private:
    TensorDescriptor    _desc;
    ITensorAccessorUPtr _accessor;
};

/** Output Layer */
class OutputLayer final : public ILayer
{
public:
    /** Construct an output layer.
     *
     * @param[in] accessor Accessor to give output tensor data to.
     */
    OutputLayer(ITensorAccessorUPtr accessor)
        : _accessor(std::move(accessor))
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_output_node(s.graph(), common_params, input, std::move(_accessor));
    }

private:
    ITensorAccessorUPtr _accessor;
};

/** Activation Layer */
class ActivationLayer final : public ILayer
{
public:
    /** Construct an activation layer.
     *
     * @param[in] act_info Activation information
     */
    ActivationLayer(ActivationLayerInfo act_info)
        : _act_info(act_info)
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info);
    }

private:
    ActivationLayerInfo _act_info;
};

/** Batchnormalization Layer */
class BatchNormalizationLayer final : public ILayer
{
public:
    /** Construct a batch normalization layer.
     *
     * @param[in] mean    Accessor to get mean tensor data from.
     * @param[in] var     Accessor to get var tensor data from.
     * @param[in] gamma   (Optional) Accessor to get gamma tensor data from. Default: nullptr.
     * @param[in] beta    (Optional) Accessor to get beta tensor data from. Default: nullptr.
     * @param[in] epsilon (Optional) Epsilon value. Default: 0.001.
     */
    BatchNormalizationLayer(ITensorAccessorUPtr mean,
                            ITensorAccessorUPtr var,
                            ITensorAccessorUPtr gamma   = nullptr,
                            ITensorAccessorUPtr beta    = nullptr,
                            float               epsilon = 0.001f)
        : _mean(std::move(mean)), _var(std::move(var)), _gamma(std::move(gamma)), _beta(std::move(beta)), _epsilon(epsilon)
    {
    }

    NodeID create_layer(IStream &s) override
    {
        ARM_COMPUTE_ERROR_ON(_mean == nullptr);
        ARM_COMPUTE_ERROR_ON(_var == nullptr);

        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_batch_normalization_node(s.graph(), common_params, input, _epsilon,
                                                          std::move(_mean), std::move(_var), std::move(_beta), std::move(_gamma));
    }

private:
    ITensorAccessorUPtr _mean;
    ITensorAccessorUPtr _var;
    ITensorAccessorUPtr _gamma;
    ITensorAccessorUPtr _beta;
    float               _epsilon;
};

/** Convolution Layer */
class ConvolutionLayer final : public ILayer
{
public:
    /** Construct a convolution layer.
     *
     * @param[in] conv_width  Convolution width.
     * @param[in] conv_height Convolution height.
     * @param[in] ofm         Output feature map.
     * @param[in] weights     Accessor to get kernel weights from.
     * @param[in] bias        Accessor to get kernel bias from.
     * @param[in] conv_info   Padding and stride information.
     * @param[in] num_groups  (Optional) Number of groups. Default: 1.
     */
    ConvolutionLayer(unsigned int        conv_width,
                     unsigned int        conv_height,
                     unsigned int        ofm,
                     ITensorAccessorUPtr weights,
                     ITensorAccessorUPtr bias,
                     PadStrideInfo       conv_info,
                     unsigned int        num_groups = 1)
        : _conv_width(conv_width),
          _conv_height(conv_height),
          _ofm(ofm),
          _conv_info(std::move(conv_info)),
          _num_groups(num_groups),
          _weights(std::move(weights)),
          _bias(std::move(bias))
    {
    }

    NodeID create_layer(IStream &s) override
    {
        ARM_COMPUTE_UNUSED(_num_groups);
        NodeIdxPair input         = { s.tail_node(), 0 };
        NodeParams  common_params = { "", s.hints().target_hint };
        return GraphBuilder::add_convolution_node(s.graph(), common_params, input,
                                                  Size2D(_conv_width, _conv_height), _ofm, _conv_info,
                                                  s.hints().convolution_method_hint,
                                                  std::move(_weights), std::move(_bias));
    }

private:
    unsigned int        _conv_width;
    unsigned int        _conv_height;
    unsigned int        _ofm;
    const PadStrideInfo _conv_info;
    unsigned int        _num_groups;
    ITensorAccessorUPtr _weights;
    ITensorAccessorUPtr _bias;
};

/** Depthwise Convolution Layer */
class DepthwiseConvolutionLayer final : public ILayer
{
public:
    /** Construct a depthwise convolution layer.
     *
     * @param[in] conv_width  Convolution width.
     * @param[in] conv_height Convolution height.
     * @param[in] weights     Accessor to get kernel weights from.
     * @param[in] bias        Accessor to get kernel bias from.
     * @param[in] conv_info   Padding and stride information.
     */
    DepthwiseConvolutionLayer(unsigned int        conv_width,
                              unsigned int        conv_height,
                              ITensorAccessorUPtr weights,
                              ITensorAccessorUPtr bias,
                              PadStrideInfo       conv_info)
        : _conv_width(conv_width),
          _conv_height(conv_height),
          _conv_info(std::move(conv_info)),
          _weights(std::move(weights)),
          _bias(std::move(bias))
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeIdxPair input         = { s.tail_node(), 0 };
        NodeParams  common_params = { "", s.hints().target_hint };
        return GraphBuilder::add_depthwise_convolution_node(s.graph(), common_params,
                                                            input, Size2D(_conv_width, _conv_height), _conv_info,
                                                            s.hints().depthwise_convolution_method_hint,
                                                            std::move(_weights), std::move(_bias));
    }

private:
    unsigned int        _conv_width;
    unsigned int        _conv_height;
    const PadStrideInfo _conv_info;
    ITensorAccessorUPtr _weights;
    ITensorAccessorUPtr _bias;
};

/** Flatten Layer */
class FlattenLayer final : public ILayer
{
public:
    /** Construct a flatten layer. */
    FlattenLayer()
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_flatten_node(s.graph(), common_params, input);
    }
};

/** Fully Connected Layer */
class FullyConnectedLayer final : public ILayer
{
public:
    /** Construct a fully connected layer.
     *
     * @param[in] num_outputs Number of outputs.
     * @param[in] weights     Accessor to get weights from.
     * @param[in] bias        Accessor to get bias from.
     */
    FullyConnectedLayer(unsigned int        num_outputs,
                        ITensorAccessorUPtr weights,
                        ITensorAccessorUPtr bias)
        : _num_outputs(num_outputs), _weights(std::move(weights)), _bias(std::move(bias))
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_fully_connected_layer(s.graph(), common_params, input, _num_outputs,
                                                       std::move(_weights), std::move(_bias));
    }

private:
    unsigned int        _num_outputs;
    ITensorAccessorUPtr _weights;
    ITensorAccessorUPtr _bias;
};

/** Normalization Layer */
class NormalizationLayer final : public ILayer
{
public:
    /** Construct a normalization layer.
     *
     * @param[in] norm_info Normalization information.
     */
    NormalizationLayer(NormalizationLayerInfo norm_info)
        : _norm_info(norm_info)
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_normalization_node(s.graph(), common_params, input, _norm_info);
    }

private:
    NormalizationLayerInfo _norm_info;
};

/** Pooling Layer */
class PoolingLayer final : public ILayer
{
public:
    /** Construct a pooling layer.
     *
     * @param[in] pool_info Pooling information.
     */
    PoolingLayer(PoolingLayerInfo pool_info)
        : _pool_info(pool_info)
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_pooling_node(s.graph(), common_params, input, _pool_info);
    }

private:
    PoolingLayerInfo _pool_info;
};

/** Reshape Layer */
class ReshapeLayer final : public ILayer
{
public:
    /** Construct a reshape layer.
     *
     * @param[in] shape Target shape.
     */
    ReshapeLayer(TensorShape shape)
        : _shape(shape)
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_reshape_node(s.graph(), common_params, input, _shape);
    }

private:
    TensorShape _shape;
};

/** Softmax Layer */
class SoftmaxLayer final : public ILayer
{
public:
    /** Construct a softmax layer.
     *
     * @param[in] beta (Optional) Beta value. Default 1.0.
     */
    SoftmaxLayer(float beta = 1.0f)
        : _beta(beta)
    {
    }

    NodeID create_layer(IStream &s) override
    {
        NodeParams  common_params = { "", s.hints().target_hint };
        NodeIdxPair input         = { s.tail_node(), 0 };
        return GraphBuilder::add_softmax_node(s.graph(), common_params, input, _beta);
    }

private:
    float _beta;
};

/** Branch Layer */
class BranchLayer final : public ILayer
{
public:
    /** Construct a branch layer
     *
     * @param[in] merge_method     Branch merging method
     * @param[in] sub_stream1      First graph branch
     * @param[in] sub_stream2      Second graph branch
     * @param[in] rest_sub_streams Rest sub-graph branches
     */
    template <typename... Ts>
    BranchLayer(BranchMergeMethod merge_method, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
        : _branch_merge_method(merge_method), _sub_streams()
    {
        _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
        _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));

        utility::for_each([&](SubStream && sub_stream)
        {
            _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
        },
        std::move(rest_sub_streams)...);
    }
    /** Construct a branch layer
     *
     * @param[in] sub_stream Sub-stream
     */
    template <typename... Ts>
    BranchLayer(SubStream &&sub_stream)
        : _branch_merge_method(BranchMergeMethod::DEPTH_CONCATENATE), _sub_streams()
    {
        _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
    }
    NodeID create_layer(IStream &s) override
    {
        NodeID     nid           = EmptyNodeID;
        NodeParams common_params = { "", s.hints().target_hint };
        if(_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
        {
            nid = _sub_streams[0]->tail_node();
        }
        else if(_branch_merge_method == BranchMergeMethod::DEPTH_CONCATENATE)
        {
            // Collect tail nodes and perform DepthConcatenate
            std::vector<NodeIdxPair> nodes;
            for(auto &ss : _sub_streams)
            {
                if(ss && (ss->tail_node() != EmptyNodeID))
                {
                    const auto tail_node = s.graph().node(ss->tail_node());
                    if(tail_node != nullptr && tail_node->type() != NodeType::Output)
                    {
                        nodes.push_back({ ss->tail_node(), 0 });
                    }
                }
            }
            nid = GraphBuilder::add_depth_concatenate_node(s.graph(), common_params, nodes);
        }
        else
        {
            ARM_COMPUTE_ERROR_ON(_sub_streams.size() != 2);
            NodeIdxPair input0 = { _sub_streams[0]->tail_node(), 0 };
            NodeIdxPair input1 = { _sub_streams[1]->tail_node(), 0 };
            nid                = GraphBuilder::add_elementwise_node(s.graph(), common_params, input0, input1, EltwiseOperation::ADD);
        }
        return nid;
    }

private:
    BranchMergeMethod                       _branch_merge_method;
    std::vector<std::unique_ptr<SubStream>> _sub_streams;
};
} // namespace frontend
} // namespace graph2
} // namespace arm_compute
#endif /* __ARM_COMPUTE_GRAPH2_LAYERS_H__ */