ArmNN
 22.08
Types.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <array>
8 #include <functional>
9 #include <stdint.h>
10 #include <chrono>
11 #include "BackendId.hpp"
12 #include "Exceptions.hpp"
13 #include "Deprecated.hpp"
14 
15 namespace arm
16 {
17 namespace pipe
18 {
19 
20 class ProfilingGuid;
21 
22 } // namespace arm
23 } // namespace pipe
24 
25 /// Define LayerGuid type.
26 using LayerGuid = arm::pipe::ProfilingGuid;
27 
28 namespace armnn
29 {
30 
31 constexpr unsigned int MaxNumOfTensorDimensions = 5U;
32 
33 /// The lowest performance data capture interval we support is 10 miliseconds.
34 constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
35 
36 /// Variable to control expire rate of priority queue
37 constexpr unsigned int EXPIRE_RATE = 3U;
38 
39 /// @enum Status enumeration
40 /// @var Status::Successful
41 /// @var Status::Failure
42 enum class Status
43 {
44  Success = 0,
45  Failure = 1
46 };
47 
48 enum class DataType
49 {
50  Float16 = 0,
51  Float32 = 1,
52  QAsymmU8 = 2,
53  Signed32 = 3,
54  Boolean = 4,
55  QSymmS16 = 5,
56  QSymmS8 = 6,
57  QAsymmS8 = 7,
58  BFloat16 = 8,
59  Signed64 = 9,
60 };
61 
62 enum class DataLayout
63 {
64  NCHW = 1,
65  NHWC = 2,
66  NDHWC = 3,
67  NCDHW = 4
68 };
69 
70 /// Define the behaviour of the internal profiler when outputting network details
72 {
73  Undefined = 0,
75  DetailsOnly = 2
76 };
77 
78 
79 enum class QosExecPriority
80 {
81  Low = 0,
82  Medium = 1,
83  High = 2
84 };
85 
87 {
88  Sigmoid = 0,
89  TanH = 1,
90  Linear = 2,
91  ReLu = 3,
92  BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
93  SoftReLu = 5,
94  LeakyReLu = 6,
95  Abs = 7,
96  Sqrt = 8,
97  Square = 9,
98  Elu = 10,
99  HardSwish = 11
100 };
101 
103 {
104  Min = 0,
105  Max = 1
106 };
107 
109 {
110  Equal = 0,
111  Greater = 1,
112  GreaterOrEqual = 2,
113  Less = 3,
114  LessOrEqual = 4,
115  NotEqual = 5
116 };
117 
119 {
120  LogicalAnd = 0,
121  LogicalOr = 1
122 };
123 
124 enum class UnaryOperation
125 {
126  Abs = 0,
127  Exp = 1,
128  Sqrt = 2,
129  Rsqrt = 3,
130  Neg = 4,
131  LogicalNot = 5,
132  Log = 6,
133  Sin = 7
134 };
135 
137 {
138  Max = 0,
139  Average = 1,
140  L2 = 2
141 };
142 
143 enum class ReduceOperation
144 {
145  Sum = 0,
146  Max = 1,
147  Mean = 2,
148  Min = 3,
149  Prod = 4
150 };
151 
152 enum class ResizeMethod
153 {
154  Bilinear = 0,
155  NearestNeighbor = 1
156 };
157 
158 enum class Dimensionality
159 {
160  NotSpecified = 0,
161  Specified = 1,
162  Scalar = 2
163 };
164 
165 ///
166 /// The padding method modifies the output of pooling layers.
167 /// In both supported methods, the values are ignored (they are
168 /// not even zeroes, which would make a difference for max pooling
169 /// a tensor with negative values). The difference between
170 /// IgnoreValue and Exclude is that the former counts the padding
171 /// fields in the divisor of Average and L2 pooling, while
172 /// Exclude does not.
173 ///
174 enum class PaddingMethod
175 {
176  /// The padding fields count, but are ignored
177  IgnoreValue = 0,
178  /// The padding fields don't count and are ignored
179  Exclude = 1
180 };
181 
182 ///
183 /// The padding mode controls whether the padding should be filled with constant values (Constant), or
184 /// reflect the input, either including the border values (Symmetric) or not (Reflect).
185 ///
186 enum class PaddingMode
187 {
188  Constant = 0,
189  Reflect = 1,
190  Symmetric = 2
191 };
192 
194 {
195  Across = 0,
196  Within = 1
197 };
198 
200 {
201  /// Krichevsky 2012: Local Brightness Normalization
202  LocalBrightness = 0,
203  /// Jarret 2009: Local Contrast Normalization
204  LocalContrast = 1
205 };
206 
208 {
209  Floor = 0,
210  Ceiling = 1
211 };
212 
213 ///
214 /// The ShapeInferenceMethod modify how the output shapes are treated.
215 /// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer
216 /// and any mismatch is reported.
217 /// When InferAndValidate is selected 2 actions are performed: (1)infer output shape from inputs and (2)validate the
218 /// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not
219 /// specified explicitly, however this information can be calculated from the inputs.
220 ///
222 {
223  /// Validate all output shapes
224  ValidateOnly = 0,
225  /// Infer missing output shapes and validate all output shapes
226  InferAndValidate = 1
227 };
228 
229 /// Define the Memory Source to reduce copies
230 enum class MemorySource : uint32_t
231 {
232  Undefined = 0,
233  Malloc = 1,
234  DmaBuf = 2,
235  DmaBufProtected = 4,
236  Gralloc = 5
237 };
238 
240 {
241  // MemBlocks can be packed on the Y axis only, overlap allowed on X axis.
242  // In other words MemBlocks with overlapping lifetimes cannot use the same MemBin,
243  // equivalent to blob or pooling memory management.
244  SingleAxisPacking = 0,
245 
246  // MemBlocks can be packed on either Y or X axis but cannot overlap on both.
247  // In other words MemBlocks with overlapping lifetimes can use the same MemBin,
248  // equivalent to offset or slab memory management.
249  MultiAxisPacking = 1
250 };
251 
252 /// Each backend should implement an IBackend.
253 class IBackend
254 {
255 protected:
256  IBackend() {}
257  virtual ~IBackend() {}
258 
259 public:
260  virtual const BackendId& GetId() const = 0;
261 };
262 
263 using IBackendSharedPtr = std::shared_ptr<IBackend>;
264 using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
265 
266 /// BackendCapability class
267 enum class BackendCapability : uint32_t
268 {
269  /// Constant weights can be accessed through the descriptors,
270  /// On the other hand, non-const weights can be accessed through inputs.
272 
273  /// Asynchronous Execution.
275 
276  // add new enum values here
277 };
278 
279 /// Device specific knowledge to be passed to the optimizer.
281 {
282 protected:
284  virtual ~IDeviceSpec() {}
285 public:
286  virtual const BackendIdSet& GetSupportedBackends() const = 0;
287 };
288 
289 /// Type of identifiers for bindable layers (inputs, outputs).
290 using LayerBindingId = int;
291 using ImportedInputId = unsigned int;
292 using ImportedOutputId = unsigned int;
293 
294 
296 {
297 public:
298  using ValueType = unsigned int;
299  using SizeType = unsigned int;
300  using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
301  using ConstIterator = typename ArrayType::const_iterator;
302 
303  /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
304  /// when source and target potentially have different memory layouts.
305  ///
306  /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
307  /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
308  /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
309  /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
310  /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
311  /// [ 0, 2, 3, 1 ].
312  ///
313  /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
314  /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
315  /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
316  /// [ 0, 3, 1, 2 ].
317  ///
318  PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
319 
320  PermutationVector(std::initializer_list<ValueType> dimMappings);
321 
322  ///
323  /// Indexing method with out-of-bounds error checking for the m_DimMappings array.
324  /// @param i - integer value corresponding to index of m_DimMappings array to retrieve element from.
325  /// @return element at index i of m_DimMappings array.
326  /// @throws InvalidArgumentException when indexing out-of-bounds index of m_DimMappings array.
327  ///
329  {
330  if (i >= GetSize())
331  {
332  throw InvalidArgumentException("Invalid indexing of PermutationVector of size " + std::to_string(GetSize())
333  + " at location [" + std::to_string(i) + "].");
334  }
335  return m_DimMappings.at(i);
336  }
337 
338  SizeType GetSize() const { return m_NumDimMappings; }
339 
340  ConstIterator begin() const { return m_DimMappings.begin(); }
341  /**
342  *
343  * @return pointer one past the end of the number of mapping not the length of m_DimMappings.
344  */
345  ConstIterator end() const { return m_DimMappings.begin() + m_NumDimMappings; }
346 
347  bool IsEqual(const PermutationVector& other) const
348  {
349  if (m_NumDimMappings != other.m_NumDimMappings) return false;
350  for (unsigned int i = 0; i < m_NumDimMappings; ++i)
351  {
352  if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
353  }
354  return true;
355  }
356 
357  bool IsInverse(const PermutationVector& other) const
358  {
359  bool isInverse = (GetSize() == other.GetSize());
360  for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
361  {
362  isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
363  }
364  return isInverse;
365  }
366 
367 private:
368  ArrayType m_DimMappings;
369  /// Number of valid entries in @ref m_DimMappings
370  SizeType m_NumDimMappings;
371 };
372 
373 class ITensorHandle;
374 
375 /// Define the type of callback for the Debug layer to call
376 /// @param guid - guid of layer connected to the input of the Debug layer
377 /// @param slotIndex - index of the output slot connected to the input of the Debug layer
378 /// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
379 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
380 
381 /// Define a timer and associated inference ID for recording execution times
382 using HighResolutionClock = std::chrono::high_resolution_clock::time_point;
383 using InferenceTimingPair = std::pair<HighResolutionClock, HighResolutionClock>;
384 
385 
386 /// This list uses X macro technique.
387 /// See https://en.wikipedia.org/wiki/X_Macro for more info
388 #define LIST_OF_LAYER_TYPE \
389  X(Activation) \
390  X(Addition) \
391  X(ArgMinMax) \
392  X(BatchNormalization) \
393  X(BatchToSpaceNd) \
394  X(Comparison) \
395  X(Concat) \
396  X(Constant) \
397  X(ConvertBf16ToFp32) \
398  X(ConvertFp16ToFp32) \
399  X(ConvertFp32ToBf16) \
400  X(ConvertFp32ToFp16) \
401  X(Convolution2d) \
402  X(Debug) \
403  X(DepthToSpace) \
404  X(DepthwiseConvolution2d) \
405  X(Dequantize) \
406  X(DetectionPostProcess) \
407  X(Division) \
408  X(ElementwiseUnary) \
409  X(FakeQuantization) \
410  X(Fill) \
411  X(Floor) \
412  X(FullyConnected) \
413  X(Gather) \
414  X(Input) \
415  X(InstanceNormalization) \
416  X(L2Normalization) \
417  X(LogicalBinary) \
418  X(LogSoftmax) \
419  X(Lstm) \
420  X(QLstm) \
421  X(Map) \
422  X(Maximum) \
423  X(Mean) \
424  X(MemCopy) \
425  X(MemImport) \
426  X(Merge) \
427  X(Minimum) \
428  X(Multiplication) \
429  X(Normalization) \
430  X(Output) \
431  X(Pad) \
432  X(Permute) \
433  X(Pooling2d) \
434  X(PreCompiled) \
435  X(Prelu) \
436  X(Quantize) \
437  X(QuantizedLstm) \
438  X(Reshape) \
439  X(Rank) \
440  X(Resize) \
441  X(Reduce) \
442  X(Slice) \
443  X(Softmax) \
444  X(SpaceToBatchNd) \
445  X(SpaceToDepth) \
446  X(Splitter) \
447  X(Stack) \
448  X(StandIn) \
449  X(StridedSlice) \
450  X(Subtraction) \
451  X(Switch) \
452  X(Transpose) \
453  X(TransposeConvolution2d) \
454  X(Unmap) \
455  X(Cast) \
456  X(Shape) \
457  X(UnidirectionalSequenceLstm) \
458  X(ChannelShuffle) \
459  X(Convolution3d) \
460  X(Pooling3d) \
461  X(GatherNd) \
462  X(BatchMatMul) \
463 
464 // New layers should be added at last to minimize instability.
465 
466 /// When adding a new layer, adapt also the LastLayer enum value in the
467 /// enum class LayerType below
468 enum class LayerType
469 {
470 #define X(name) name,
472 #undef X
473  FirstLayer = Activation,
474  LastLayer = UnidirectionalSequenceLstm
475 };
476 
477 const char* GetLayerTypeAsCString(LayerType type);
478 
479 } // namespace armnn
unsigned int ValueType
Definition: Types.hpp:298
Dimensionality
Definition: Types.hpp:158
DataLayout
Definition: Types.hpp:62
std::chrono::high_resolution_clock::time_point HighResolutionClock
Define a timer and associated inference ID for recording execution times.
Definition: Types.hpp:382
virtual ~IBackend()
Definition: Types.hpp:257
constexpr unsigned int EXPIRE_RATE
Variable to control expire rate of priority queue.
Definition: Types.hpp:37
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:193
unsigned int ImportedOutputId
Definition: Types.hpp:292
typename ArrayType::const_iterator ConstIterator
Definition: Types.hpp:301
Each backend should implement an IBackend.
Definition: Types.hpp:253
The padding fields don&#39;t count and are ignored.
#define LIST_OF_LAYER_TYPE
This list uses X macro technique.
Definition: Types.hpp:388
NormalizationAlgorithmChannel
Definition: Types.hpp:193
Copyright (c) 2021 ARM Limited and Contributors.
SizeType GetSize() const
Definition: Types.hpp:338
PoolingAlgorithm
Definition: Types.hpp:136
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:379
LogicalBinaryOperation
Definition: Types.hpp:118
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
ProfilingDetailsMethod
Define the behaviour of the internal profiler when outputting network details.
Definition: Types.hpp:71
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:174
Constant weights can be accessed through the descriptors, On the other hand, non-const weights can be...
std::shared_ptr< IBackend > IBackendSharedPtr
Definition: Types.hpp:263
ComparisonOperation
Definition: Types.hpp:108
ReduceOperation
Definition: Types.hpp:143
DataType
Definition: Types.hpp:48
BackendCapability
BackendCapability class.
Definition: Types.hpp:267
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Definition: Types.hpp:300
Validate all output shapes.
Status
enumeration
Definition: Types.hpp:42
virtual ~IDeviceSpec()
Definition: Types.hpp:284
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:280
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
Definition: Types.hpp:34
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
Definition: Types.hpp:264
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant)...
Definition: Types.hpp:186
min(a, max(b, input)) ReLu1 & ReLu6.
unsigned int SizeType
Definition: Types.hpp:299
ValueType operator[](SizeType i) const
Indexing method with out-of-bounds error checking for the m_DimMappings array.
Definition: Types.hpp:328
OutputShapeRounding
Definition: Types.hpp:207
unsigned int ImportedInputId
Definition: Types.hpp:291
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:347
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
The padding fields count, but are ignored.
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:230
Jarret 2009: Local Contrast Normalization.
ArgMinMaxFunction
Definition: Types.hpp:102
ConstIterator begin() const
Definition: Types.hpp:340
ResizeMethod
Definition: Types.hpp:152
ConstIterator end() const
Definition: Types.hpp:345
UnaryOperation
Definition: Types.hpp:124
Infer missing output shapes and validate all output shapes.
QosExecPriority
Definition: Types.hpp:79
MemBlockStrategyType
Definition: Types.hpp:239
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
Definition: Types.hpp:357
NormalizationAlgorithmMethod
Definition: Types.hpp:199
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:221
const char * GetLayerTypeAsCString(LayerType type)
std::pair< HighResolutionClock, HighResolutionClock > InferenceTimingPair
Definition: Types.hpp:383
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:31
ActivationFunction
Definition: Types.hpp:86
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468