ArmNN
 21.02
Types.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <array>
8 #include <functional>
9 #include <memory>
10 #include <stdint.h>
11 #include "BackendId.hpp"
12 #include "Exceptions.hpp"
13 #include "Deprecated.hpp"
14 
15 namespace armnn
16 {
17 
18 constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19 
20 /// The lowest performance data capture interval we support is 10 miliseconds.
21 constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22 
23 /// @enum Status enumeration
24 /// @var Status::Successful
25 /// @var Status::Failure
26 enum class Status
27 {
28  Success = 0,
29  Failure = 1
30 };
31 
32 enum class DataType
33 {
34  Float16 = 0,
35  Float32 = 1,
36  QAsymmU8 = 2,
37  Signed32 = 3,
38  Boolean = 4,
39  QSymmS16 = 5,
40  QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
41  QSymmS8 = 7,
42  QAsymmS8 = 8,
43  BFloat16 = 9,
44  Signed64 = 10,
45 
46  QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
47  QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
48 };
49 
50 enum class DataLayout
51 {
52  NCHW = 1,
53  NHWC = 2
54 };
55 
57 {
58  Sigmoid = 0,
59  TanH = 1,
60  Linear = 2,
61  ReLu = 3,
62  BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
63  SoftReLu = 5,
64  LeakyReLu = 6,
65  Abs = 7,
66  Sqrt = 8,
67  Square = 9,
68  Elu = 10,
69  HardSwish = 11
70 };
71 
73 {
74  Min = 0,
75  Max = 1
76 };
77 
79 {
80  Equal = 0,
81  Greater = 1,
82  GreaterOrEqual = 2,
83  Less = 3,
84  LessOrEqual = 4,
85  NotEqual = 5
86 };
87 
89 {
90  LogicalAnd = 0,
91  LogicalOr = 1
92 };
93 
94 enum class UnaryOperation
95 {
96  Abs = 0,
97  Exp = 1,
98  Sqrt = 2,
99  Rsqrt = 3,
100  Neg = 4,
101  LogicalNot = 5
102 };
103 
105 {
106  Max = 0,
107  Average = 1,
108  L2 = 2
109 };
110 
111 enum class ReduceOperation
112 {
113  Sum = 0,
114  Max = 1,
115  Mean = 2,
116  Min = 3
117 };
118 
119 enum class ResizeMethod
120 {
121  Bilinear = 0,
122  NearestNeighbor = 1
123 };
124 
125 enum class Dimensionality
126 {
127  NotSpecified = 0,
128  Specified = 1,
129  Scalar = 2
130 };
131 
132 ///
133 /// The padding method modifies the output of pooling layers.
134 /// In both supported methods, the values are ignored (they are
135 /// not even zeroes, which would make a difference for max pooling
136 /// a tensor with negative values). The difference between
137 /// IgnoreValue and Exclude is that the former counts the padding
138 /// fields in the divisor of Average and L2 pooling, while
139 /// Exclude does not.
140 ///
141 enum class PaddingMethod
142 {
143  /// The padding fields count, but are ignored
144  IgnoreValue = 0,
145  /// The padding fields don't count and are ignored
146  Exclude = 1
147 };
148 
150 {
151  Across = 0,
152  Within = 1
153 };
154 
156 {
157  /// Krichevsky 2012: Local Brightness Normalization
158  LocalBrightness = 0,
159  /// Jarret 2009: Local Contrast Normalization
160  LocalContrast = 1
161 };
162 
164 {
165  Floor = 0,
166  Ceiling = 1
167 };
168 
169 ///
170 /// The ShapeInferenceMethod modify how the output shapes are treated.
171 /// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer
172 /// and any mismatch is reported.
173 /// When InferAndValidate is selected 2 actions must be performed: (1)infer output shape from inputs and (2)validate the
174 /// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not
175 /// specified explicitly, however this information can be calculated from the inputs.
176 ///
178 {
179  /// Validate all output shapes
180  ValidateOnly = 0,
181  /// Infer missing output shapes and validate all output shapes
182  InferAndValidate = 1
183 };
184 
185 /// Each backend should implement an IBackend.
186 class IBackend
187 {
188 protected:
189  IBackend() {}
190  virtual ~IBackend() {}
191 
192 public:
193  virtual const BackendId& GetId() const = 0;
194 };
195 
196 using IBackendSharedPtr = std::shared_ptr<IBackend>;
197 using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
198 
199 /// Device specific knowledge to be passed to the optimizer.
201 {
202 protected:
204  virtual ~IDeviceSpec() {}
205 public:
206  virtual const BackendIdSet& GetSupportedBackends() const = 0;
207 };
208 
209 /// Type of identifiers for bindable layers (inputs, outputs).
210 using LayerBindingId = int;
211 
213 {
214 public:
215  using ValueType = unsigned int;
216  using SizeType = unsigned int;
217  using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
218  using ConstIterator = typename ArrayType::const_iterator;
219 
220  /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
221  /// when source and target potentially have different memory layouts.
222  ///
223  /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
224  /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
225  /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
226  /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
227  /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
228  /// [ 0, 2, 3, 1 ].
229  ///
230  /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
231  /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
232  /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
233  /// [ 0, 3, 1, 2 ].
234  ///
235  PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
236 
237  PermutationVector(std::initializer_list<ValueType> dimMappings);
238 
239  ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
240 
241  SizeType GetSize() const { return m_NumDimMappings; }
242 
243  ConstIterator begin() const { return m_DimMappings.begin(); }
244  ConstIterator end() const { return m_DimMappings.end(); }
245 
246  bool IsEqual(const PermutationVector& other) const
247  {
248  if (m_NumDimMappings != other.m_NumDimMappings) return false;
249  for (unsigned int i = 0; i < m_NumDimMappings; ++i)
250  {
251  if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
252  }
253  return true;
254  }
255 
256  bool IsInverse(const PermutationVector& other) const
257  {
258  bool isInverse = (GetSize() == other.GetSize());
259  for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
260  {
261  isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
262  }
263  return isInverse;
264  }
265 
266 private:
267  ArrayType m_DimMappings;
268  /// Number of valid entries in @ref m_DimMappings
269  SizeType m_NumDimMappings;
270 };
271 
272 namespace profiling { class ProfilingGuid; }
273 
274 /// Define LayerGuid type.
276 
277 class ITensorHandle;
278 
279 /// Define the type of callback for the Debug layer to call
280 /// @param guid - guid of layer connected to the input of the Debug layer
281 /// @param slotIndex - index of the output slot connected to the input of the Debug layer
282 /// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
283 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
284 
285 
286 namespace profiling
287 {
288 
289 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
290 
292 {
293 public:
294  ProfilingGuid() : m_Guid(0) {}
295 
296  ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
297 
298  operator uint64_t() const { return m_Guid; }
299 
300  bool operator==(const ProfilingGuid& other) const
301  {
302  return m_Guid == other.m_Guid;
303  }
304 
305  bool operator!=(const ProfilingGuid& other) const
306  {
307  return m_Guid != other.m_Guid;
308  }
309 
310  bool operator<(const ProfilingGuid& other) const
311  {
312  return m_Guid < other.m_Guid;
313  }
314 
315  bool operator<=(const ProfilingGuid& other) const
316  {
317  return m_Guid <= other.m_Guid;
318  }
319 
320  bool operator>(const ProfilingGuid& other) const
321  {
322  return m_Guid > other.m_Guid;
323  }
324 
325  bool operator>=(const ProfilingGuid& other) const
326  {
327  return m_Guid >= other.m_Guid;
328  }
329 
330 protected:
331  uint64_t m_Guid;
332 };
333 
334 /// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
336 {
338 };
339 
341 {
343 };
344 
345 } // namespace profiling
346 
347 /// This list uses X macro technique.
348 /// See https://en.wikipedia.org/wiki/X_Macro for more info
349 #define LIST_OF_LAYER_TYPE \
350  X(Activation) \
351  X(Addition) \
352  X(ArgMinMax) \
353  X(BatchNormalization) \
354  X(BatchToSpaceNd) \
355  X(Comparison) \
356  X(Concat) \
357  X(Constant) \
358  X(ConvertBf16ToFp32) \
359  X(ConvertFp16ToFp32) \
360  X(ConvertFp32ToBf16) \
361  X(ConvertFp32ToFp16) \
362  X(Convolution2d) \
363  X(Debug) \
364  X(DepthToSpace) \
365  X(DepthwiseConvolution2d) \
366  X(Dequantize) \
367  X(DetectionPostProcess) \
368  X(Division) \
369  X(ElementwiseUnary) \
370  X(FakeQuantization) \
371  X(Fill) \
372  X(Floor) \
373  X(FullyConnected) \
374  X(Gather) \
375  X(Input) \
376  X(InstanceNormalization) \
377  X(L2Normalization) \
378  X(LogicalBinary) \
379  X(LogSoftmax) \
380  X(Lstm) \
381  X(QLstm) \
382  X(Map) \
383  X(Maximum) \
384  X(Mean) \
385  X(MemCopy) \
386  X(MemImport) \
387  X(Merge) \
388  X(Minimum) \
389  X(Multiplication) \
390  X(Normalization) \
391  X(Output) \
392  X(Pad) \
393  X(Permute) \
394  X(Pooling2d) \
395  X(PreCompiled) \
396  X(Prelu) \
397  X(Quantize) \
398  X(QuantizedLstm) \
399  X(Reshape) \
400  X(Rank) \
401  X(Resize) \
402  X(Reduce) \
403  X(Slice) \
404  X(Softmax) \
405  X(SpaceToBatchNd) \
406  X(SpaceToDepth) \
407  X(Splitter) \
408  X(Stack) \
409  X(StandIn) \
410  X(StridedSlice) \
411  X(Subtraction) \
412  X(Switch) \
413  X(Transpose) \
414  X(TransposeConvolution2d) \
415  X(Unmap)
416 
417 /// When adding a new layer, adapt also the LastLayer enum value in the
418 /// enum class LayerType below
419 enum class LayerType
420 {
421 #define X(name) name,
423 #undef X
425  LastLayer = Unmap
426 };
427 
428 const char* GetLayerTypeAsCString(LayerType type);
429 
430 } // namespace armnn
431 
432 
433 namespace std
434 {
435 /// make ProfilingGuid hashable
436 template<>
437 struct hash<armnn::profiling::ProfilingGuid>
438 {
439  std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
440  {
441  return hash<uint64_t>()(uint64_t(guid));
442  }
443 };
444 
445 /// make ProfilingDynamicGuid hashable
446 template<>
447 struct hash<armnn::profiling::ProfilingDynamicGuid>
448 {
449  std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
450  {
451  return hash<uint64_t>()(uint64_t(guid));
452  }
453 };
454 
455 /// make ProfilingStaticGuid hashable
456 template<>
457 struct hash<armnn::profiling::ProfilingStaticGuid>
458 {
459  std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
460  {
461  return hash<uint64_t>()(uint64_t(guid));
462  }
463 };
464 } // namespace std
bool operator<(const ProfilingGuid &other) const
Definition: Types.hpp:310
unsigned int ValueType
Definition: Types.hpp:215
bool operator>=(const ProfilingGuid &other) const
Definition: Types.hpp:325
Dimensionality
Definition: Types.hpp:125
DataLayout
Definition: Types.hpp:50
virtual ~IBackend()
Definition: Types.hpp:190
bool operator==(const ProfilingGuid &other) const
Definition: Types.hpp:300
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
std::size_t operator()(armnn::profiling::ProfilingGuid const &guid) const noexcept
Definition: Types.hpp:439
typename ArrayType::const_iterator ConstIterator
Definition: Types.hpp:218
Each backend should implement an IBackend.
Definition: Types.hpp:186
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
Definition: Types.hpp:335
The padding fields don&#39;t count and are ignored.
#define LIST_OF_LAYER_TYPE
This list uses X macro technique.
Definition: Types.hpp:349
NormalizationAlgorithmChannel
Definition: Types.hpp:149
Copyright (c) 2021 ARM Limited and Contributors.
SizeType GetSize() const
Definition: Types.hpp:241
PoolingAlgorithm
Definition: Types.hpp:104
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:283
std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const &guid) const noexcept
Definition: Types.hpp:449
LogicalBinaryOperation
Definition: Types.hpp:88
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:141
std::shared_ptr< IBackend > IBackendSharedPtr
Definition: Types.hpp:196
ComparisonOperation
Definition: Types.hpp:78
ReduceOperation
Definition: Types.hpp:111
bool operator<=(const ProfilingGuid &other) const
Definition: Types.hpp:315
DataType
Definition: Types.hpp:32
#define ARMNN_DEPRECATED_ENUM_MSG(message)
Definition: Deprecated.hpp:50
std::size_t operator()(armnn::profiling::ProfilingStaticGuid const &guid) const noexcept
Definition: Types.hpp:459
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Definition: Types.hpp:217
Validate all output shapes.
Status
enumeration
Definition: Types.hpp:26
virtual ~IDeviceSpec()
Definition: Types.hpp:204
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:200
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
Definition: Types.hpp:21
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
Definition: Types.hpp:197
min(a, max(b, input)) ReLu1 & ReLu6.
unsigned int SizeType
Definition: Types.hpp:216
ValueType operator[](SizeType i) const
Definition: Types.hpp:239
OutputShapeRounding
Definition: Types.hpp:163
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:246
The padding fields count, but are ignored.
ProfilingGuid(uint64_t guid)
Definition: Types.hpp:296
bool operator!=(const ProfilingGuid &other) const
Definition: Types.hpp:305
Jarret 2009: Local Contrast Normalization.
ArgMinMaxFunction
Definition: Types.hpp:72
ConstIterator begin() const
Definition: Types.hpp:243
ResizeMethod
Definition: Types.hpp:119
ConstIterator end() const
Definition: Types.hpp:244
UnaryOperation
Definition: Types.hpp:94
Infer missing output shapes and validate all output shapes.
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
Definition: Types.hpp:256
NormalizationAlgorithmMethod
Definition: Types.hpp:155
bool operator>(const ProfilingGuid &other) const
Definition: Types.hpp:320
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:177
const char * GetLayerTypeAsCString(LayerType type)
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:18
ActivationFunction
Definition: Types.hpp:56
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:419