ArmNN
 22.02
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
23 {
24  virtual bool IsNull() const { return false; }
25  virtual ~BaseDescriptor() = default;
26 };
27 
28 /// Null Descriptor used as a return value from the IConnectableLayer GetParameters method
29 /// by layers which do not have a descriptor
31 {
32  bool IsNull() const override { return true; }
33 };
34 
35 /// An ActivationDescriptor for the ActivationLayer.
37 {
39  : m_Function(ActivationFunction::Sigmoid)
40  , m_A(0)
41  , m_B(0)
42  {}
43 
45  float a = 0,
46  float b = 0)
47  : m_Function(activation)
48  , m_A(a)
49  , m_B(b)
50  {}
51 
52  bool operator ==(const ActivationDescriptor &rhs) const
53  {
54  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
55  }
56 
57  /// @brief The activation function to use
58  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
60  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
61  float m_A;
62  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
63  float m_B;
64 };
65 
66 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
68 {
70  : m_Function(ArgMinMaxFunction::Min)
71  , m_Axis(-1)
72  , m_Output_Type(armnn::DataType::Signed32)
73  {}
74 
75  bool operator ==(const ArgMinMaxDescriptor &rhs) const
76  {
77  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
78  }
79 
80  /// Specify if the function is to find Min or Max.
82  /// Axis to reduce across the input tensor.
83  int m_Axis;
84  /// Deprecated and will be removed in future release.
86 };
87 
88 /// A ComparisonDescriptor for the ComparisonLayer
90 {
93  {}
94 
96  : m_Operation(operation)
97  {}
98 
99  bool operator ==(const ComparisonDescriptor &rhs) const
100  {
101  return m_Operation == rhs.m_Operation;
102  }
103 
104  /// Specifies the comparison operation to execute
106 };
107 
108 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
110 {
113  {}
114 
116  : m_Operation(operation)
117  {}
118 
120  {
121  return m_Operation == rhs.m_Operation;
122  }
123 
124  /// Specifies the elementwiseUnary operation to execute
126 };
127 
128 /// A PermuteDescriptor for the PermuteLayer.
130 {
132  : m_DimMappings{}
133  {}
134 
136  : m_DimMappings(dimMappings)
137  {}
138 
139  bool operator ==(const PermuteDescriptor &rhs) const
140  {
141  return m_DimMappings.IsEqual(rhs.m_DimMappings);
142  }
143 
144  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
145  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
147 };
148 
149 /// A SoftmaxDescriptor for the SoftmaxLayer.
151 {
153  : m_Beta(1.0f)
154  , m_Axis(-1)
155  {}
156 
157  bool operator ==(const SoftmaxDescriptor& rhs) const
158  {
159  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
160  }
161 
162  /// Exponentiation value.
163  float m_Beta;
164  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
165  int m_Axis;
166 };
167 
168 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
170 
171 /// @brief An OriginsDescriptor for the ConcatLayer.
172 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
173 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
175 {
177  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
178  OriginsDescriptor(const OriginsDescriptor& other);
180 
182 
183  OriginsDescriptor& operator=(OriginsDescriptor rhs);
184 
185  bool operator ==(const OriginsDescriptor& rhs) const;
186 
187  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
188  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
189  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
190  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
191  /// Get the number of views.
192  uint32_t GetNumViews() const;
193  /// Get the number of dimensions.
194  uint32_t GetNumDimensions() const;
195  /// Return the view origin at the int value idx.
196  const uint32_t* GetViewOrigin(uint32_t idx) const;
197  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
198  /// The number of views must match number of elements in the new ordering array.
199  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
200  /// Swap the ViewsDescriptor values first and second.
201  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
202  /// Set the concatenation axis value.
203  void SetConcatAxis(unsigned int concatAxis);
204  /// Get the concatenation axis value.
205  unsigned int GetConcatAxis() const;
206 
207 private:
208  unsigned int m_ConcatAxis;
209  uint32_t m_NumViews;
210  uint32_t m_NumDimensions;
211  uint32_t** m_ViewOrigins;
212 };
213 
214 /// @brief A ViewsDescriptor for the SplitterLayer.
215 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
216 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
218 {
219  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
220  ViewsDescriptor(const ViewsDescriptor& other);
221  ViewsDescriptor();
223 
224  ~ViewsDescriptor();
225 
226  ViewsDescriptor& operator=(ViewsDescriptor rhs);
227 
228  bool operator ==(const ViewsDescriptor& rhs) const;
229 
230  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
231  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
232  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
233  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
234  /// @brief Set the size of the views. The arguments are: view, dimension, value.
235  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
236  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
237  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
238 
239  /// Get the number of views.
240  uint32_t GetNumViews() const;
241  /// Get the number of dimensions.
242  uint32_t GetNumDimensions() const;
243  /// Get the view origin at the int value idx.
244  const uint32_t* GetViewOrigin(uint32_t idx) const;
245  /// Get the view sizes at the int value idx.
246  const uint32_t* GetViewSizes(uint32_t idx) const;
247  /// Get the View Origins
248  const OriginsDescriptor& GetOrigins() const;
249 
250  /// Swap the ViewsDescriptor value first and second.
251  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
252 private:
253  OriginsDescriptor m_Origins;
254  uint32_t** m_ViewSizes;
255 };
256 
257 
258 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
259 /// concatenation of a number of input tensors.
260 template <typename TensorShapeIt>
262  TensorShapeIt last,
263  unsigned int concatenationDimension)
264 {
265  auto numInputs = std::distance(first, last);
266 
267  if (numInputs < 2)
268  {
269  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
270  }
271 
272  const auto& firstInputShape = *first;
273 
274  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
275  for (auto it = first + 1; it != last; ++it)
276  {
277  if (it->GetNumDimensions() != numDimensions)
278  {
279  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
280  }
281  }
282 
283  if (concatenationDimension >= numDimensions)
284  {
285  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
286  }
287 
288  for (auto it = first; it != last; ++it)
289  {
290  for (unsigned int d = 0; d < numDimensions; ++d)
291  {
292  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
293  if (!dimSizeOk)
294  {
295  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
296  " except the concatenation dimension");
297  }
298  }
299  }
300 
301  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
302  viewsDescriptor.SetConcatAxis(concatenationDimension);
303 
304  uint32_t viewIndex = 0u;
305  uint32_t coordAlongConcatDim = 0u;
306  for (auto it = first; it != last; ++it)
307  {
308  const auto& inputShape = *it;
309 
310  for (unsigned int i = 0; i < concatenationDimension; ++i)
311  {
312  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
313  }
314 
315  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
316  unsigned int dimSize = inputShape[concatenationDimension];
317  coordAlongConcatDim += dimSize;
318 
319 
320  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
321  {
322  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
323  }
324 
325  ++viewIndex;
326  }
327 
328  return viewsDescriptor;
329 }
330 
331 /// A Pooling2dDescriptor for the Pooling2dLayer.
333 {
335  : m_PoolType(PoolingAlgorithm::Max)
336  , m_PadLeft(0)
337  , m_PadRight(0)
338  , m_PadTop(0)
339  , m_PadBottom(0)
340  , m_PoolWidth(0)
341  , m_PoolHeight(0)
342  , m_StrideX(0)
343  , m_StrideY(0)
344  , m_OutputShapeRounding(OutputShapeRounding::Floor)
345  , m_PaddingMethod(PaddingMethod::Exclude)
346  , m_DataLayout(DataLayout::NCHW)
347  {}
348 
349  bool operator ==(const Pooling2dDescriptor& rhs) const
350  {
351  return m_PoolType == rhs.m_PoolType &&
352  m_PadLeft == rhs.m_PadLeft &&
353  m_PadRight == rhs.m_PadRight &&
354  m_PadTop == rhs.m_PadTop &&
355  m_PadBottom == rhs.m_PadBottom &&
356  m_PoolWidth == rhs.m_PoolWidth &&
357  m_PoolHeight == rhs.m_PoolHeight &&
358  m_StrideX == rhs.m_StrideX &&
359  m_StrideY == rhs.m_StrideY &&
360  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
361  m_PaddingMethod == rhs.m_PaddingMethod &&
362  m_DataLayout == rhs.m_DataLayout;
363  }
364 
365  /// The pooling algorithm to use (Max. Average, L2).
367  /// Padding left value in the width dimension.
368  uint32_t m_PadLeft;
369  /// Padding right value in the width dimension.
370  uint32_t m_PadRight;
371  /// Padding top value in the height dimension.
372  uint32_t m_PadTop;
373  /// Padding bottom value in the height dimension.
374  uint32_t m_PadBottom;
375  /// Pooling width value.
376  uint32_t m_PoolWidth;
377  /// Pooling height value.
378  uint32_t m_PoolHeight;
379  /// Stride value when proceeding through input for the width dimension.
380  uint32_t m_StrideX;
381  /// Stride value when proceeding through input for the height dimension.
382  uint32_t m_StrideY;
383  /// The rounding method for the output shape. (Floor, Ceiling).
385  /// The padding method to be used. (Exclude, IgnoreValue).
387  /// The data layout to be used (NCHW, NHWC).
389 };
390 
391 /// A Pooling3dDescriptor for the Pooling3dLayer.
393 {
395  : m_PoolType(PoolingAlgorithm::Max)
396  , m_PadLeft(0)
397  , m_PadRight(0)
398  , m_PadTop(0)
399  , m_PadBottom(0)
400  , m_PadFront(0)
401  , m_PadBack(0)
402  , m_PoolWidth(0)
403  , m_PoolHeight(0)
404  , m_PoolDepth(0)
405  , m_StrideX(0)
406  , m_StrideY(0)
407  , m_StrideZ(0)
408  , m_OutputShapeRounding(OutputShapeRounding::Floor)
409  , m_PaddingMethod(PaddingMethod::Exclude)
410  , m_DataLayout(DataLayout::NCDHW)
411  {}
412 
413  bool operator ==(const Pooling3dDescriptor& rhs) const
414  {
415  return m_PoolType == rhs.m_PoolType &&
416  m_PadLeft == rhs.m_PadLeft &&
417  m_PadRight == rhs.m_PadRight &&
418  m_PadTop == rhs.m_PadTop &&
419  m_PadBottom == rhs.m_PadBottom &&
420  m_PadFront == rhs.m_PadFront &&
421  m_PadBack == rhs.m_PadBack &&
422  m_PoolWidth == rhs.m_PoolWidth &&
423  m_PoolHeight == rhs.m_PoolHeight &&
424  m_PoolDepth == rhs.m_PoolDepth &&
425  m_StrideX == rhs.m_StrideX &&
426  m_StrideY == rhs.m_StrideY &&
427  m_StrideZ == rhs.m_StrideZ &&
428  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
429  m_PaddingMethod == rhs.m_PaddingMethod &&
430  m_DataLayout == rhs.m_DataLayout;
431  }
432 
433  /// The pooling algorithm to use (Max. Average, L2).
435  /// Padding left value in the width dimension.
436  uint32_t m_PadLeft;
437  /// Padding right value in the width dimension.
438  uint32_t m_PadRight;
439  /// Padding top value in the height dimension.
440  uint32_t m_PadTop;
441  /// Padding bottom value in the height dimension.
442  uint32_t m_PadBottom;
443  /// Padding front value in the depth dimension.
444  uint32_t m_PadFront;
445  /// Padding back value in the depth dimension.
446  uint32_t m_PadBack;
447  /// Pooling width value.
448  uint32_t m_PoolWidth;
449  /// Pooling height value.
450  uint32_t m_PoolHeight;
451  /// Pooling depth value.
452  uint32_t m_PoolDepth;
453  /// Stride value when proceeding through input for the width dimension.
454  uint32_t m_StrideX;
455  /// Stride value when proceeding through input for the height dimension.
456  uint32_t m_StrideY;
457  /// Stride value when proceeding through input for the depth dimension.
458  uint32_t m_StrideZ;
459  /// The rounding method for the output shape. (Floor, Ceiling).
461  /// The padding method to be used. (Exclude, IgnoreValue).
463  /// The data layout to be used (NCDHW, NDHWC).
465 };
466 
467 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
469 {
471  : m_BiasEnabled(false)
472  , m_TransposeWeightMatrix(false)
473  , m_ConstantWeights(true)
474  {}
475 
476  bool operator ==(const FullyConnectedDescriptor& rhs) const
477  {
478  return m_BiasEnabled == rhs.m_BiasEnabled
479  && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
480  && m_ConstantWeights == rhs.m_ConstantWeights;
481  }
482 
483  /// Get the number of views/inputs.
484  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
485  uint32_t GetNumViews() const;
486 
487  /// Get the number of views/inputs.
488  uint32_t GetNumInputs() const;
489 
490  /// Enable/disable bias.
492  /// Enable/disable transpose weight matrix.
494  /// Enable/disable constant weights and biases.
496 };
497 
498 /// A Convolution2dDescriptor for the Convolution2dLayer.
500 {
502  : m_PadLeft(0)
503  , m_PadRight(0)
504  , m_PadTop(0)
505  , m_PadBottom(0)
506  , m_StrideX(1)
507  , m_StrideY(1)
508  , m_DilationX(1)
509  , m_DilationY(1)
510  , m_BiasEnabled(false)
511  , m_DataLayout(DataLayout::NCHW)
512  {}
513 
514  bool operator ==(const Convolution2dDescriptor& rhs) const
515  {
516  return m_PadLeft == rhs.m_PadLeft &&
517  m_PadRight == rhs.m_PadRight &&
518  m_PadTop == rhs.m_PadTop &&
519  m_PadBottom == rhs.m_PadBottom &&
520  m_StrideX == rhs.m_StrideX &&
521  m_StrideY == rhs.m_StrideY &&
522  m_DilationX == rhs.m_DilationX &&
523  m_DilationY == rhs.m_DilationY &&
524  m_BiasEnabled == rhs.m_BiasEnabled &&
525  m_DataLayout == rhs.m_DataLayout;
526  }
527 
528  /// Padding left value in the width dimension.
529  uint32_t m_PadLeft;
530  /// Padding right value in the width dimension.
531  uint32_t m_PadRight;
532  /// Padding top value in the height dimension.
533  uint32_t m_PadTop;
534  /// Padding bottom value in the height dimension.
535  uint32_t m_PadBottom;
536  /// Stride value when proceeding through input for the width dimension.
537  uint32_t m_StrideX;
538  /// Stride value when proceeding through input for the height dimension.
539  uint32_t m_StrideY;
540  /// Dilation along x axis
541  uint32_t m_DilationX;
542  /// Dilation along y axis
543  uint32_t m_DilationY;
544  /// Enable/disable bias.
546  /// The data layout to be used (NCHW, NHWC).
548 };
549 
550 /// A Convolution3dDescriptor for the Convolution3dLayer.
552 {
554  : m_PadLeft(0)
555  , m_PadRight(0)
556  , m_PadTop(0)
557  , m_PadBottom(0)
558  , m_PadFront(0)
559  , m_PadBack(0)
560  , m_StrideX(1)
561  , m_StrideY(1)
562  , m_StrideZ(1)
563  , m_DilationX(1)
564  , m_DilationY(1)
565  , m_DilationZ(1)
566  , m_BiasEnabled(false)
567  , m_DataLayout(DataLayout::NDHWC)
568  {}
569 
570  bool operator ==(const Convolution3dDescriptor& rhs) const
571  {
572  return m_PadLeft == rhs.m_PadLeft &&
573  m_PadRight == rhs.m_PadRight &&
574  m_PadTop == rhs.m_PadTop &&
575  m_PadBottom == rhs.m_PadBottom &&
576  m_PadFront == rhs.m_PadFront &&
577  m_PadBack == rhs.m_PadBack &&
578  m_StrideX == rhs.m_StrideX &&
579  m_StrideY == rhs.m_StrideY &&
580  m_StrideZ == rhs.m_StrideZ &&
581  m_DilationX == rhs.m_DilationX &&
582  m_DilationY == rhs.m_DilationY &&
583  m_DilationZ == rhs.m_DilationZ &&
584  m_BiasEnabled == rhs.m_BiasEnabled &&
585  m_DataLayout == rhs.m_DataLayout;
586  }
587 
588  /// Get the number of views/inputs.
589  uint32_t GetNumInputs() const;
590 
591  /// Padding left value in the width dimension.
592  uint32_t m_PadLeft;
593  /// Padding right value in the width dimension.
594  uint32_t m_PadRight;
595  /// Padding top value in the height dimension.
596  uint32_t m_PadTop;
597  /// Padding bottom value in the height dimension.
598  uint32_t m_PadBottom;
599  /// Padding front value in the depth dimension.
600  uint32_t m_PadFront;
601  /// Padding back value in the depth dimension.
602  uint32_t m_PadBack;
603  /// Stride value when proceeding through input for the width dimension.
604  uint32_t m_StrideX;
605  /// Stride value when proceeding through input for the height dimension.
606  uint32_t m_StrideY;
607  /// Stride value when proceeding through input for the depth dimension.
608  uint32_t m_StrideZ;
609  /// Dilation along x axis
610  uint32_t m_DilationX;
611  /// Dilation along y axis
612  uint32_t m_DilationY;
613  /// Dilation along z axis
614  uint32_t m_DilationZ;
615  /// Enable/disable bias.
617  /// The data layout to be used (NDHWC, NCDHW).
619 };
620 
621 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
623 {
625  : m_PadLeft(0)
626  , m_PadRight(0)
627  , m_PadTop(0)
628  , m_PadBottom(0)
629  , m_StrideX(1)
630  , m_StrideY(1)
631  , m_DilationX(1)
632  , m_DilationY(1)
633  , m_BiasEnabled(false)
634  , m_DataLayout(DataLayout::NCHW)
635  {}
636 
638  {
639  return m_PadLeft == rhs.m_PadLeft &&
640  m_PadRight == rhs.m_PadRight &&
641  m_PadTop == rhs.m_PadTop &&
642  m_PadBottom == rhs.m_PadBottom &&
643  m_StrideX == rhs.m_StrideX &&
644  m_StrideY == rhs.m_StrideY &&
645  m_DilationX == rhs.m_DilationX &&
646  m_DilationY == rhs.m_DilationY &&
647  m_BiasEnabled == rhs.m_BiasEnabled &&
648  m_DataLayout == rhs.m_DataLayout;
649  }
650 
651  /// Padding left value in the width dimension.
652  uint32_t m_PadLeft;
653  /// Padding right value in the width dimension.
654  uint32_t m_PadRight;
655  /// Padding top value in the height dimension.
656  uint32_t m_PadTop;
657  /// Padding bottom value in the height dimension.
658  uint32_t m_PadBottom;
659  /// Stride value when proceeding through input for the width dimension.
660  uint32_t m_StrideX;
661  /// Stride value when proceeding through input for the height dimension.
662  uint32_t m_StrideY;
663  /// Dilation factor value for width dimension.
664  uint32_t m_DilationX;
665  /// Dilation factor value for height dimension.
666  uint32_t m_DilationY;
667  /// Enable/disable bias.
669  /// The data layout to be used (NCHW, NHWC).
671 };
672 
674 {
676  : m_MaxDetections(0)
677  , m_MaxClassesPerDetection(1)
678  , m_DetectionsPerClass(1)
679  , m_NmsScoreThreshold(0)
680  , m_NmsIouThreshold(0)
681  , m_NumClasses(0)
682  , m_UseRegularNms(false)
683  , m_ScaleX(0)
684  , m_ScaleY(0)
685  , m_ScaleW(0)
686  , m_ScaleH(0)
687  {}
688 
690  {
691  return m_MaxDetections == rhs.m_MaxDetections &&
692  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
693  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
694  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
695  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
696  m_NumClasses == rhs.m_NumClasses &&
697  m_UseRegularNms == rhs.m_UseRegularNms &&
698  m_ScaleX == rhs.m_ScaleX &&
699  m_ScaleY == rhs.m_ScaleY &&
700  m_ScaleW == rhs.m_ScaleW &&
701  m_ScaleH == rhs.m_ScaleH;
702  }
703 
704  /// Maximum numbers of detections.
705  uint32_t m_MaxDetections;
706  /// Maximum numbers of classes per detection, used in Fast NMS.
708  /// Detections per classes, used in Regular NMS.
710  /// NMS score threshold.
712  /// Intersection over union threshold.
714  /// Number of classes.
715  uint32_t m_NumClasses;
716  /// Use Regular NMS.
718  /// Center size encoding scale x.
719  float m_ScaleX;
720  /// Center size encoding scale y.
721  float m_ScaleY;
722  /// Center size encoding scale weight.
723  float m_ScaleW;
724  /// Center size encoding scale height.
725  float m_ScaleH;
726 };
727 
728 /// A NormalizationDescriptor for the NormalizationLayer.
730 {
732  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
733  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
734  , m_NormSize(0)
735  , m_Alpha(0.f)
736  , m_Beta(0.f)
737  , m_K(0.f)
738  , m_DataLayout(DataLayout::NCHW)
739  {}
740 
741  bool operator ==(const NormalizationDescriptor& rhs) const
742  {
743  return m_NormChannelType == rhs.m_NormChannelType &&
744  m_NormMethodType == rhs.m_NormMethodType &&
745  m_NormSize == rhs.m_NormSize &&
746  m_Alpha == rhs.m_Alpha &&
747  m_Beta == rhs.m_Beta &&
748  m_K == rhs.m_K &&
749  m_DataLayout == rhs.m_DataLayout;
750  }
751 
752  /// Normalization channel algorithm to use (Across, Within).
754  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
756  /// Depth radius value.
757  uint32_t m_NormSize;
758  /// Alpha value for the normalization equation.
759  float m_Alpha;
760  /// Beta value for the normalization equation.
761  float m_Beta;
762  /// Kappa value used for the across channel normalization equation.
763  float m_K;
764  /// The data layout to be used (NCHW, NHWC).
766 };
767 
768 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
770 {
772  : m_Eps(1e-12f)
773  , m_DataLayout(DataLayout::NCHW)
774  {}
775 
776  bool operator ==(const L2NormalizationDescriptor& rhs) const
777  {
778  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
779  }
780 
781  /// Used to avoid dividing by zero.
782  float m_Eps;
783  /// The data layout to be used (NCHW, NHWC).
785 };
786 
787 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
789 {
791  : m_Eps(0.0001f)
792  , m_DataLayout(DataLayout::NCHW)
793  {}
794 
796  {
797  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
798  }
799 
800  /// Value to add to the variance. Used to avoid dividing by zero.
801  float m_Eps;
802  /// The data layout to be used (NCHW, NHWC).
804 };
805 
806 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
808 {
810  : m_Gamma(1.0f)
811  , m_Beta(0.0f)
812  , m_Eps(1e-12f)
813  , m_DataLayout(DataLayout::NCHW)
814  {}
815 
817  {
818  return m_Gamma == rhs.m_Gamma &&
819  m_Beta == rhs.m_Beta &&
820  m_Eps == rhs.m_Eps &&
821  m_DataLayout == rhs.m_DataLayout;
822  }
823 
824  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
825  float m_Gamma;
826  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
827  float m_Beta;
828  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
829  float m_Eps;
830  /// The data layout to be used (NCHW, NHWC).
832 };
833 
834 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
836 {
838  : m_BlockShape({1, 1})
839  , m_Crops({{0, 0}, {0, 0}})
840  , m_DataLayout(DataLayout::NCHW)
841  {}
842 
843  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
844  std::vector<std::pair<unsigned int, unsigned int>> crops)
845  : m_BlockShape(blockShape)
846  , m_Crops(crops)
847  , m_DataLayout(DataLayout::NCHW)
848  {}
849 
850  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
851  {
852  return m_BlockShape == rhs.m_BlockShape &&
853  m_Crops == rhs.m_Crops &&
854  m_DataLayout == rhs.m_DataLayout;
855  }
856 
857  /// Block shape values.
858  std::vector<unsigned int> m_BlockShape;
859  /// The values to crop from the input dimension.
860  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
861  /// The data layout to be used (NCHW, NHWC).
863 };
864 
865 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
867 {
869  : m_Min(-6.0f)
870  , m_Max(6.0f)
871  {}
872 
874  {
875  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
876  }
877 
878  /// Minimum value.
879  float m_Min;
880  /// Maximum value.
881  float m_Max;
882 };
883 
884 /// A FillDescriptor for the FillLayer
886 {
888  : m_Value(0)
889  {}
890 
891  FillDescriptor(const float& value)
892  : m_Value(value)
893  {}
894 
895  bool operator ==(const FillDescriptor& rhs) const
896  {
897  return m_Value == rhs.m_Value;
898  }
899 
900  float m_Value;
901 };
902 
903 /// A GatherDescriptor for the GatherLayer.
905 {
907  : m_Axis(0)
908  {}
909 
910  GatherDescriptor(int32_t axis)
911  : m_Axis(axis)
912  {}
913 
914  bool operator ==(const GatherDescriptor& rhs) const
915  {
916  return m_Axis == rhs.m_Axis;
917  }
918 
919  /// The axis in params to gather indices from
920  int32_t m_Axis;
921 };
922 
923 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
925  "ResizeBilinearDescriptor is not supported anymore. Use ResizeDescriptor instead.",
926  "22.08")
927  ResizeBilinearDescriptor : BaseDescriptor
928 {
929  ResizeBilinearDescriptor()
930  : m_TargetWidth(0)
931  , m_TargetHeight(0)
932  , m_DataLayout(DataLayout::NCHW)
933  , m_AlignCorners(false)
934  , m_HalfPixelCenters(false)
935  {}
936 
938  bool operator ==(const ResizeBilinearDescriptor& rhs) const
939  {
940  return m_TargetWidth == rhs.m_TargetWidth &&
941  m_TargetHeight == rhs.m_TargetHeight &&
942  m_DataLayout == rhs.m_DataLayout &&
943  m_AlignCorners == rhs.m_AlignCorners &&
944  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
945  }
947 
948  /// Target width value.
949  uint32_t m_TargetWidth;
950  /// Target height value.
951  uint32_t m_TargetHeight;
952  /// The data layout to be used (NCHW, NHWC).
953  DataLayout m_DataLayout;
954  /// Aligned corners
955  bool m_AlignCorners;
956  /// Half Pixel Centers
957  bool m_HalfPixelCenters;
958 };
959 
960 /// A ResizeDescriptor for the ResizeLayer.
962 {
964  : m_TargetWidth(0)
965  , m_TargetHeight(0)
966  , m_Method(ResizeMethod::NearestNeighbor)
967  , m_DataLayout(DataLayout::NCHW)
968  , m_AlignCorners(false)
969  , m_HalfPixelCenters(false)
970  {}
971 
972  bool operator ==(const ResizeDescriptor& rhs) const
973  {
974  return m_TargetWidth == rhs.m_TargetWidth &&
975  m_TargetHeight == rhs.m_TargetHeight &&
976  m_Method == rhs.m_Method &&
977  m_DataLayout == rhs.m_DataLayout &&
978  m_AlignCorners == rhs.m_AlignCorners &&
979  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
980  }
981 
982  /// Target width value.
983  uint32_t m_TargetWidth;
984  /// Target height value.
985  uint32_t m_TargetHeight;
986  /// The Interpolation method to use
987  /// (Bilinear, NearestNeighbor).
989  /// The data layout to be used (NCHW, NHWC).
991  /// Aligned corners
993  /// Half Pixel Centers
995 };
996 
997 
998 /// A ReshapeDescriptor for the ReshapeLayer.
1000 {
1002  : m_TargetShape()
1003  {}
1004 
1006  : m_TargetShape(shape)
1007  {}
1008 
1009  bool operator ==(const ReshapeDescriptor& rhs) const
1010  {
1011  return m_TargetShape == rhs.m_TargetShape;
1012  }
1013 
1014  /// Target shape value.
1016 };
1017 
1018 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
1020 {
1022  : m_BlockShape({1, 1})
1023  , m_PadList({{0, 0}, {0, 0}})
1024  , m_DataLayout(DataLayout::NCHW)
1025  {}
1026 
1027  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
1028  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1029  : m_BlockShape(blockShape)
1030  , m_PadList(padList)
1031  , m_DataLayout(DataLayout::NCHW)
1032  {}
1033 
1034  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
1035  {
1036  return m_BlockShape == rhs.m_BlockShape &&
1037  m_PadList == rhs.m_PadList &&
1038  m_DataLayout == rhs.m_DataLayout;
1039  }
1040 
1041  /// Block shape value.
1042  std::vector<unsigned int> m_BlockShape;
1043  /// @brief Specifies the padding values for the input dimension:
1044  /// heightPad{top, bottom} widthPad{left, right}.
1045  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1046  /// The data layout to be used (NCHW, NHWC).
1048 };
1049 
1050 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
1052 {
1055  {}
1056 
1057  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
1058  : m_BlockSize(blockSize)
1059  , m_DataLayout(dataLayout)
1060  {}
1061 
1062  bool operator ==(const SpaceToDepthDescriptor& rhs) const
1063  {
1064  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
1065  }
1066 
1067  /// Scalar specifying the input block size. It must be >= 1
1068  unsigned int m_BlockSize;
1069 
1070  /// The data layout to be used (NCHW, NHWC).
1072 };
1073 
1074 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
1076 
1077 /// An LstmDescriptor for the LstmLayer.
1079 {
1081  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
1082  , m_ClippingThresCell(0.0)
1083  , m_ClippingThresProj(0.0)
1084  , m_CifgEnabled(true)
1085  , m_PeepholeEnabled(false)
1086  , m_ProjectionEnabled(false)
1087  , m_LayerNormEnabled(false)
1088  , m_TimeMajor(false)
1089  {}
1090 
1091  bool operator ==(const LstmDescriptor& rhs) const
1092  {
1093  return m_ActivationFunc == rhs.m_ActivationFunc &&
1094  m_ClippingThresCell == rhs.m_ClippingThresCell &&
1095  m_ClippingThresProj == rhs.m_ClippingThresProj &&
1096  m_CifgEnabled == rhs.m_CifgEnabled &&
1097  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1098  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1099  m_TimeMajor == rhs.m_TimeMajor;
1100  }
1101 
1102  /// @brief The activation function to use.
1103  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
1105  /// Clipping threshold value for the cell state.
1107  /// Clipping threshold value for the projection.
1109  /// Enable/disable cifg (coupled input & forget gate).
1111  /// Enable/disable peephole.
1113  /// Enable/disable the projection layer.
1115  /// Enable/disable layer normalization
1117  /// Enable/disable time major
1119 };
1120 
1122 
1123 /// A MeanDescriptor for the MeanLayer.
1125 {
1127  : m_Axis()
1128  , m_KeepDims(false)
1129  {}
1130 
1131  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
1132  : m_Axis(axis)
1133  , m_KeepDims(keepDims)
1134  {}
1135 
1136  bool operator ==(const MeanDescriptor& rhs) const
1137  {
1138  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
1139  }
1140 
1141  /// Values for the dimensions to reduce.
1142  std::vector<unsigned int> m_Axis;
1143  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
1145 };
1146 
1147 /// A PadDescriptor for the PadLayer.
1149 {
1150  PadDescriptor() : m_PadValue(0), m_PaddingMode(PaddingMode::Constant)
1151  {}
1152 
1153  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList,
1154  const float& padValue = 0,
1155  const PaddingMode& paddingMode = PaddingMode::Constant)
1156  : m_PadList(padList)
1157  , m_PadValue(padValue)
1158  , m_PaddingMode(paddingMode)
1159  {}
1160 
1161  bool operator ==(const PadDescriptor& rhs) const
1162  {
1163  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode;
1164  }
1165 
1166  /// @brief Specifies the padding for input dimension.
1167  /// First is the number of values to add before the tensor in the dimension.
1168  /// Second is the number of values to add after the tensor in the dimension.
1169  /// The number of pairs should match the number of dimensions in the input tensor.
1170  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1171 
1172  /// Optional value to use for padding, defaults to 0
1173  float m_PadValue;
1174 
1175  /// Specifies the Padding mode (Constant, Reflect or Symmetric)
1177 };
1178 
1179 /// A SliceDescriptor for the SliceLayer.
1181 {
1182  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1183  : m_Begin(begin)
1184  , m_Size(size)
1185  {}
1186 
1188  {}
1189 
1190  bool operator ==(const SliceDescriptor& rhs) const
1191  {
1192  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1193  }
1194 
1195  /// Beginning indices of the slice in each dimension.
1196  std::vector<unsigned int> m_Begin;
1197 
1198  /// Size of the slice in each dimension.
1199  std::vector<unsigned int> m_Size;
1200 };
1201 
1202 /// A StackDescriptor for the StackLayer.
1204 {
1206  : m_Axis(0)
1207  , m_NumInputs(0)
1208  , m_InputShape()
1209  {}
1210 
1211  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1212  : m_Axis(axis)
1213  , m_NumInputs(numInputs)
1214  , m_InputShape(inputShape)
1215  {}
1216 
1217  bool operator ==(const StackDescriptor& rhs) const
1218  {
1219  return m_Axis == rhs.m_Axis &&
1220  m_NumInputs == rhs.m_NumInputs &&
1221  m_InputShape == rhs.m_InputShape;
1222  }
1223 
1224  /// 0-based axis along which to stack the input tensors.
1225  uint32_t m_Axis;
1226  /// Number of input tensors.
1227  uint32_t m_NumInputs;
1228  /// Required shape of all input tensors.
1230 };
1231 
1232 /// A StandInDescriptor for the StandIn layer
1234 {
1236 
1237  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1238  : m_NumInputs(numInputs)
1239  , m_NumOutputs(numOutputs)
1240  {}
1241 
1242  bool operator ==(const StandInDescriptor& rhs) const
1243  {
1244  return m_NumInputs == rhs.m_NumInputs &&
1245  m_NumOutputs == rhs.m_NumOutputs;
1246  }
1247 
1248  /// Number of input tensors
1249  uint32_t m_NumInputs = 0;
1250  /// Number of output tensors
1251  uint32_t m_NumOutputs = 0;
1252 };
1253 
1254 /// A StridedSliceDescriptor for the StridedSliceLayer.
1256 {
1257  StridedSliceDescriptor(const std::vector<int>& begin,
1258  const std::vector<int>& end,
1259  const std::vector<int>& stride)
1260  : m_Begin(begin)
1261  , m_End(end)
1262  , m_Stride(stride)
1263  , m_BeginMask(0)
1264  , m_EndMask(0)
1265  , m_ShrinkAxisMask(0)
1266  , m_EllipsisMask(0)
1267  , m_NewAxisMask(0)
1268  , m_DataLayout(DataLayout::NCHW)
1269  {}
1270 
1272  : StridedSliceDescriptor({}, {}, {})
1273  {}
1274 
1275  bool operator ==(const StridedSliceDescriptor& rhs) const
1276  {
1277  return m_Begin == rhs.m_Begin &&
1278  m_End == rhs.m_End &&
1279  m_Stride == rhs.m_Stride &&
1280  m_BeginMask == rhs.m_BeginMask &&
1281  m_EndMask == rhs.m_EndMask &&
1282  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1283  m_EllipsisMask == rhs.m_EllipsisMask &&
1284  m_NewAxisMask == rhs.m_NewAxisMask &&
1285  m_DataLayout == rhs.m_DataLayout;
1286  }
1287 
1288  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1289  int GetStopForAxis(const TensorShape& inputShape,
1290  unsigned int axis,
1291  int startForAxis) const;
1292 
1293  /// Begin values for the input that will be sliced.
1294  std::vector<int> m_Begin;
1295  /// End values for the input that will be sliced.
1296  std::vector<int> m_End;
1297  /// Stride values for the input that will be sliced.
1298  std::vector<int> m_Stride;
1299 
1300  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1301  /// range is used for the dimension.
1302  int32_t m_BeginMask;
1303  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1304  /// is used for the dimension.
1305  int32_t m_EndMask;
1306  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1308  /// Ellipsis mask value.
1310  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1311  /// a new 1 dimension is inserted to this location of the output tensor.
1312  int32_t m_NewAxisMask;
1313 
1314  /// The data layout to be used (NCHW, NHWC).
1316 };
1317 
1318 /// A PreCompiledDescriptor for the PreCompiledLayer.
1320 {
1321  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1322  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1323  {}
1324 
1325  ~PreCompiledDescriptor() = default;
1326 
1327  unsigned int m_NumInputSlots;
1328  unsigned int m_NumOutputSlots;
1329 };
1330 
1331 /// A QLstmDescriptor for the QLstmLayer.
1333 {
1335  : m_CellClip(0.0)
1336  , m_ProjectionClip(0.0)
1337  , m_CifgEnabled(true)
1338  , m_PeepholeEnabled(false)
1339  , m_ProjectionEnabled(false)
1340  , m_LayerNormEnabled(false)
1341  , m_InputIntermediateScale(0.0)
1342  , m_ForgetIntermediateScale(0.0)
1343  , m_CellIntermediateScale(0.0)
1344  , m_OutputIntermediateScale(0.0)
1345  , m_HiddenStateZeroPoint(0)
1346  , m_HiddenStateScale(0.0)
1347  {}
1348 
1349  bool operator ==(const QLstmDescriptor& rhs) const
1350  {
1351  return m_CellClip == rhs.m_CellClip &&
1352  m_ProjectionClip == rhs.m_ProjectionClip &&
1353  m_CifgEnabled == rhs.m_CifgEnabled &&
1354  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1355  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1356  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1357  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1358  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1359  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1360  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1361  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1362  m_HiddenStateScale == rhs.m_HiddenStateScale;
1363  }
1364 
1365  /// Clipping threshold value for the cell state
1366  float m_CellClip;
1367  /// Clipping threshold value for the projection
1369  /// Enable/disable CIFG (coupled input & forget gate).
1371  /// Enable/disable peephole
1373  /// Enable/disable the projection layer
1375  /// Enable/disable layer normalization
1377  /// Input intermediate quantization scale
1379  /// Forget intermediate quantization scale
1381  /// Cell intermediate quantization scale
1383  /// Output intermediate quantization scale
1385  /// Hidden State zero point
1387  /// Hidden State quantization scale
1389 };
1390 
1391 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1393 {
1395  m_PadLeft(0),
1396  m_PadRight(0),
1397  m_PadTop(0),
1398  m_PadBottom(0),
1399  m_StrideX(0),
1400  m_StrideY(0),
1401  m_BiasEnabled(false),
1402  m_DataLayout(DataLayout::NCHW),
1403  m_OutputShapeEnabled(false)
1404  {}
1405 
1407  {
1408  return m_PadLeft == rhs.m_PadLeft &&
1409  m_PadRight == rhs.m_PadRight &&
1410  m_PadTop == rhs.m_PadTop &&
1411  m_PadBottom == rhs.m_PadBottom &&
1412  m_StrideX == rhs.m_StrideX &&
1413  m_StrideY == rhs.m_StrideY &&
1414  m_BiasEnabled == rhs.m_BiasEnabled &&
1415  m_DataLayout == rhs.m_DataLayout &&
1416  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1417  m_OutputShape == rhs.m_OutputShape;
1418  }
1419 
1420  /// Padding left value in the width dimension.
1421  uint32_t m_PadLeft;
1422  /// Padding right value in the width dimension.
1423  uint32_t m_PadRight;
1424  /// Padding top value in the height dimension.
1425  uint32_t m_PadTop;
1426  /// Padding bottom value in the height dimension.
1427  uint32_t m_PadBottom;
1428  /// Stride value when proceeding through input for the width dimension.
1429  uint32_t m_StrideX;
1430  /// Stride value when proceeding through input for the height dimension.
1431  uint32_t m_StrideY;
1432  /// Enable/disable bias.
1434  /// The data layout to be used (NCHW, NHWC).
1436  /// Output shape if it has been specified.
1438  std::vector<unsigned int> m_OutputShape;
1439 };
1440 
1441 /// A TransposeDescriptor for the TransposeLayer.
1443 {
1445  : m_DimMappings{}
1446  {}
1447 
1449  : m_DimMappings(dimMappings)
1450  {}
1451 
1452  bool operator ==(const TransposeDescriptor &rhs) const
1453  {
1454  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1455  }
1456 
1457  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1458  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1460 };
1461 
1462 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1464 {
1467  {}
1468 
1470  : m_Operation(operation)
1471  {}
1472 
1473  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1474  {
1475  return m_Operation == rhs.m_Operation;
1476  }
1477 
1478  /// Specifies the logical operation to execute
1480 };
1481 
1482 /// A ReduceDescriptor for the REDUCE operators.
1484 {
1486  : m_KeepDims(false)
1487  , m_vAxis()
1488  , m_ReduceOperation(ReduceOperation::Sum)
1489  {}
1490 
1491  bool operator ==(const ReduceDescriptor& rhs) const
1492  {
1493  return m_KeepDims == rhs.m_KeepDims &&
1494  m_vAxis == rhs.m_vAxis &&
1495  m_ReduceOperation == rhs.m_ReduceOperation;
1496  }
1497 
1498  /// if true then output shape has no change.
1500  /// The indices of the dimensions to reduce.
1501  std::vector<uint32_t> m_vAxis;
1502  /// Specifies the reduction operation to execute
1504 };
1505 
1506 /// A ChannelShuffleDescriptor for the ChannelShuffle operator
1508 {
1510  : m_NumGroups(0), m_Axis(0)
1511  {}
1512 
1513  ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
1514  : m_NumGroups(numGroups), m_Axis(axis)
1515  {}
1516 
1517  bool operator ==(const ChannelShuffleDescriptor& rhs) const
1518  {
1519  return m_NumGroups == rhs.m_NumGroups;
1520  }
1521 
1522  /// Number of groups for the channel shuffle operation
1523  uint32_t m_NumGroups;
1524  /// Axis to apply channel shuffle operation on
1525  uint32_t m_Axis;
1526 };
1527 
1528 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:49
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_PoolDepth
Pooling depth value.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
bool IsNull() const override
Definition: Descriptors.hpp:32
uint32_t m_PadBack
Padding back value in the depth dimension.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0, const PaddingMode &paddingMode=PaddingMode::Constant)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:44
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:180
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
bool m_TimeMajor
Enable/disable time major.
ChannelShuffleDescriptor(const uint32_t &numGroups, const uint32_t &axis)
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:123
virtual ~BaseDescriptor()=default
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:105
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:161
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:95
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:95
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:130
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
Definition: Descriptors.hpp:30
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:35
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:29
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
ARMNN_NO_DEPRECATE_WARN_BEGIN struct ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ResizeBilinearQueueDescriptor is deprecated use ResizeQueueDescriptor instead", "22.08") ResizeBilinearQueueDescriptor
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant)...
Definition: Types.hpp:173
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:194
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:89
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:139
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:111
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_Axis
Axis to apply channel shuffle operation on.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
float m_ScaleY
Center size encoding scale y.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
NormalizationAlgorithmMethod
Definition: Types.hpp:186
A ChannelShuffleDescriptor for the ChannelShuffle operator.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
uint32_t m_DilationZ
Dilation along z axis.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
virtual bool IsNull() const
Definition: Descriptors.hpp:24
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Definition: Descriptors.hpp:85
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:73
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.