aboutsummaryrefslogtreecommitdiff
path: root/include/armnn/Types.hpp
blob: 5ea214e1dc4d25e1707a0f04c10b60a6ad38dc74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once

#include <array>
#include <functional>
#include <memory>
#include <stdint.h>
#include "BackendId.hpp"
#include "Exceptions.hpp"
#include "Deprecated.hpp"

namespace armnn
{

constexpr unsigned int MaxNumOfTensorDimensions = 5U;

// The lowest performance data capture interval we support is 10 miliseconds.
constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;

/// @enum Status enumeration
/// @var Status::Successful
/// @var Status::Failure
enum class Status
{
    Success = 0,
    Failure = 1
};

enum class DataType
{
    Float16 = 0,
    Float32 = 1,
    QAsymmU8 = 2,
    Signed32 = 3,
    Boolean = 4,
    QSymmS16 = 5,
    QuantizedSymm8PerAxis = 6,
    QSymmS8 = 7,

    QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
    QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
};

enum class DataLayout
{
    NCHW = 1,
    NHWC = 2
};

enum class ActivationFunction
{
    Sigmoid     = 0,
    TanH        = 1,
    Linear      = 2,
    ReLu        = 3,
    BoundedReLu = 4, ///< min(a, max(b, input))
    SoftReLu    = 5,
    LeakyReLu   = 6,
    Abs         = 7,
    Sqrt        = 8,
    Square      = 9
};

enum class ArgMinMaxFunction
{
    Min = 0,
    Max = 1
};

enum class ComparisonOperation
{
    Equal          = 0,
    Greater        = 1,
    GreaterOrEqual = 2,
    Less           = 3,
    LessOrEqual    = 4,
    NotEqual       = 5
};

enum class PoolingAlgorithm
{
    Max     = 0,
    Average = 1,
    L2      = 2
};

enum class ResizeMethod
{
    Bilinear        = 0,
    NearestNeighbor = 1
};

///
/// The padding method modifies the output of pooling layers.
/// In both supported methods, the values are ignored (they are
/// not even zeroes, which would make a difference for max pooling
/// a tensor with negative values). The difference between
/// IgnoreValue and Exclude is that the former counts the padding
/// fields in the divisor of Average and L2 pooling, while
/// Exclude does not.
///
enum class PaddingMethod
{
    /// The padding fields count, but are ignored
    IgnoreValue = 0,
    /// The padding fields don't count and are ignored
    Exclude     = 1
};

enum class NormalizationAlgorithmChannel
{
    Across = 0,
    Within = 1
};

enum class NormalizationAlgorithmMethod
{
    /// Krichevsky 2012: Local Brightness Normalization
    LocalBrightness = 0,
    /// Jarret 2009: Local Contrast Normalization
    LocalContrast = 1
};

enum class OutputShapeRounding
{
    Floor       = 0,
    Ceiling     = 1
};

/// Each backend should implement an IBackend.
class IBackend
{
protected:
    IBackend() {}
    virtual ~IBackend() {}

public:
    virtual const BackendId& GetId() const = 0;
};

using IBackendSharedPtr = std::shared_ptr<IBackend>;
using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;

/// Device specific knowledge to be passed to the optimizer.
class IDeviceSpec
{
protected:
    IDeviceSpec() {}
    virtual ~IDeviceSpec() {}
public:
    virtual const BackendIdSet& GetSupportedBackends() const = 0;
};

/// Type of identifiers for bindable layers (inputs, outputs).
using LayerBindingId = int;

class PermutationVector
{
public:
    using ValueType = unsigned int;
    using SizeType = unsigned int;
    using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
    using ConstIterator = typename ArrayType::const_iterator;

    /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
    /// when source and target potentially have different memory layouts.
    ///
    /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
    /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
    /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
    /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
    /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
    /// [ 0, 2, 3, 1 ].
    ///
    /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
    /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
    /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
    /// [ 0, 3, 1, 2 ].
    ///
    PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);

    PermutationVector(std::initializer_list<ValueType> dimMappings);

    ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }

    SizeType GetSize() const { return m_NumDimMappings; }

    ConstIterator begin() const { return m_DimMappings.begin(); }
    ConstIterator end() const { return m_DimMappings.end(); }

    bool IsEqual(const PermutationVector& other) const
    {
        if (m_NumDimMappings != other.m_NumDimMappings) return false;
        for (unsigned int i = 0; i < m_NumDimMappings; ++i)
        {
            if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
        }
        return true;
    }

    bool IsInverse(const PermutationVector& other) const
    {
        bool isInverse = (GetSize() == other.GetSize());
        for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
        {
            isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
        }
        return isInverse;
    }

private:
    ArrayType m_DimMappings;
    /// Number of valid entries in @ref m_DimMappings
    SizeType m_NumDimMappings;
};

namespace profiling { class ProfilingGuid; }

/// Define LayerGuid type.
using LayerGuid = profiling::ProfilingGuid;

class ITensorHandle;

/// Define the type of callback for the Debug layer to call
/// @param guid - guid of layer connected to the input of the Debug layer
/// @param slotIndex - index of the output slot connected to the input of the Debug layer
/// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;


namespace profiling
{

static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;

class ProfilingGuid
{
public:
    ProfilingGuid(uint64_t guid) : m_Guid(guid) {}

    operator uint64_t() const { return m_Guid; }

    bool operator==(const ProfilingGuid& other) const
    {
        return m_Guid == other.m_Guid;
    }

    bool operator!=(const ProfilingGuid& other) const
    {
        return m_Guid != other.m_Guid;
    }

    bool operator<(const ProfilingGuid& other) const
    {
        return m_Guid < other.m_Guid;
    }

    bool operator<=(const ProfilingGuid& other) const
    {
        return m_Guid <= other.m_Guid;
    }

    bool operator>(const ProfilingGuid& other) const
    {
        return m_Guid > other.m_Guid;
    }

    bool operator>=(const ProfilingGuid& other) const
    {
        return m_Guid >= other.m_Guid;
    }

protected:
    uint64_t m_Guid;
};

/// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
struct ProfilingDynamicGuid : public ProfilingGuid
{
    using ProfilingGuid::ProfilingGuid;
};

struct ProfilingStaticGuid : public ProfilingGuid
{
    using ProfilingGuid::ProfilingGuid;
};

} // namespace profiling

} // namespace armnn


namespace std
{
// make ProfilingGuid hashable
template<>
struct hash<armnn::profiling::ProfilingGuid>
{
    std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
    {
        return hash<uint64_t>()(uint64_t(guid));
    }
};

// make ProfilingDynamicGuid hashable
template<>
struct hash<armnn::profiling::ProfilingDynamicGuid>
{
    std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
    {
        return hash<uint64_t>()(uint64_t(guid));
    }
};

// make ProfilingStaticGuid hashable
template<>
struct hash<armnn::profiling::ProfilingStaticGuid>
{
    std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
    {
        return hash<uint64_t>()(uint64_t(guid));
    }
};
} // namespace std