ArmNN
 22.05.01
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
23 {
24  virtual bool IsNull() const { return false; }
25  virtual ~BaseDescriptor() = default;
26 };
27 
28 /// Null Descriptor used as a return value from the IConnectableLayer GetParameters method
29 /// by layers which do not have a descriptor
31 {
32  bool IsNull() const override { return true; }
33 };
34 
35 /// An ActivationDescriptor for the ActivationLayer.
37 {
39  : m_Function(ActivationFunction::Sigmoid)
40  , m_A(0)
41  , m_B(0)
42  {}
43 
45  float a = 0,
46  float b = 0)
47  : m_Function(activation)
48  , m_A(a)
49  , m_B(b)
50  {}
51 
52  bool operator ==(const ActivationDescriptor &rhs) const
53  {
54  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
55  }
56 
57  /// @brief The activation function to use
58  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
60  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
61  float m_A;
62  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
63  float m_B;
64 };
65 
66 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
68 {
70  : m_Function(ArgMinMaxFunction::Min)
71  , m_Axis(-1)
72  , m_Output_Type(armnn::DataType::Signed32)
73  {}
74 
75  bool operator ==(const ArgMinMaxDescriptor &rhs) const
76  {
77  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
78  }
79 
80  /// Specify if the function is to find Min or Max.
82  /// Axis to reduce across the input tensor.
83  int m_Axis;
84  /// Deprecated and will be removed in future release.
86 };
87 
88 /// A ComparisonDescriptor for the ComparisonLayer
90 {
93  {}
94 
96  : m_Operation(operation)
97  {}
98 
99  bool operator ==(const ComparisonDescriptor &rhs) const
100  {
101  return m_Operation == rhs.m_Operation;
102  }
103 
104  /// Specifies the comparison operation to execute
106 };
107 
108 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
110 {
113  {}
114 
116  : m_Operation(operation)
117  {}
118 
120  {
121  return m_Operation == rhs.m_Operation;
122  }
123 
124  /// Specifies the elementwiseUnary operation to execute
126 };
127 
128 /// A PermuteDescriptor for the PermuteLayer.
130 {
132  : m_DimMappings{}
133  {}
134 
136  : m_DimMappings(dimMappings)
137  {}
138 
139  bool operator ==(const PermuteDescriptor &rhs) const
140  {
141  return m_DimMappings.IsEqual(rhs.m_DimMappings);
142  }
143 
144  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
145  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
147 };
148 
149 /// A SoftmaxDescriptor for the SoftmaxLayer.
151 {
153  : m_Beta(1.0f)
154  , m_Axis(-1)
155  {}
156 
157  bool operator ==(const SoftmaxDescriptor& rhs) const
158  {
159  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
160  }
161 
162  /// Exponentiation value.
163  float m_Beta;
164  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
165  int m_Axis;
166 };
167 
168 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
170 
171 /// @brief An OriginsDescriptor for the ConcatLayer.
172 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
173 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
175 {
177  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
178  OriginsDescriptor(const OriginsDescriptor& other);
180 
182 
183  OriginsDescriptor& operator=(OriginsDescriptor rhs);
184 
185  bool operator ==(const OriginsDescriptor& rhs) const;
186 
187  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
188  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
189  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
190  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
191  /// Get the number of views.
192  uint32_t GetNumViews() const;
193  /// Get the number of dimensions.
194  uint32_t GetNumDimensions() const;
195  /// Return the view origin at the int value idx.
196  const uint32_t* GetViewOrigin(uint32_t idx) const;
197  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
198  /// The number of views must match number of elements in the new ordering array.
199  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
200  /// Swap the ViewsDescriptor values first and second.
201  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
202  /// Set the concatenation axis value.
203  void SetConcatAxis(unsigned int concatAxis);
204  /// Get the concatenation axis value.
205  unsigned int GetConcatAxis() const;
206 
207 private:
208  unsigned int m_ConcatAxis;
209  uint32_t m_NumViews;
210  uint32_t m_NumDimensions;
211  uint32_t** m_ViewOrigins;
212 };
213 
214 /// @brief A ViewsDescriptor for the SplitterLayer.
215 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
216 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
218 {
219  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
220  ViewsDescriptor(const ViewsDescriptor& other);
221  ViewsDescriptor();
223 
224  ~ViewsDescriptor();
225 
226  ViewsDescriptor& operator=(ViewsDescriptor rhs);
227 
228  bool operator ==(const ViewsDescriptor& rhs) const;
229 
230  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
231  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
232  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
233  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
234  /// @brief Set the size of the views. The arguments are: view, dimension, value.
235  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
236  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
237  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
238 
239  /// Get the number of views.
240  uint32_t GetNumViews() const;
241  /// Get the number of dimensions.
242  uint32_t GetNumDimensions() const;
243  /// Get the view origin at the int value idx.
244  const uint32_t* GetViewOrigin(uint32_t idx) const;
245  /// Get the view sizes at the int value idx.
246  const uint32_t* GetViewSizes(uint32_t idx) const;
247  /// Get the View Origins
248  const OriginsDescriptor& GetOrigins() const;
249 
250  /// Swap the ViewsDescriptor value first and second.
251  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
252 private:
253  OriginsDescriptor m_Origins;
254  uint32_t** m_ViewSizes;
255 };
256 
257 
258 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
259 /// concatenation of a number of input tensors.
260 template <typename TensorShapeIt>
262  TensorShapeIt last,
263  unsigned int concatenationDimension)
264 {
265  auto numInputs = std::distance(first, last);
266 
267  if (numInputs < 2)
268  {
269  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
270  }
271 
272  const auto& firstInputShape = *first;
273 
274  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
275  for (auto it = first + 1; it != last; ++it)
276  {
277  if (it->GetNumDimensions() != numDimensions)
278  {
279  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
280  }
281  }
282 
283  if (concatenationDimension >= numDimensions)
284  {
285  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
286  }
287 
288  for (auto it = first; it != last; ++it)
289  {
290  for (unsigned int d = 0; d < numDimensions; ++d)
291  {
292  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
293  if (!dimSizeOk)
294  {
295  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
296  " except the concatenation dimension");
297  }
298  }
299  }
300 
301  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
302  viewsDescriptor.SetConcatAxis(concatenationDimension);
303 
304  uint32_t viewIndex = 0u;
305  uint32_t coordAlongConcatDim = 0u;
306  for (auto it = first; it != last; ++it)
307  {
308  const auto& inputShape = *it;
309 
310  for (unsigned int i = 0; i < concatenationDimension; ++i)
311  {
312  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
313  }
314 
315  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
316  unsigned int dimSize = inputShape[concatenationDimension];
317  coordAlongConcatDim += dimSize;
318 
319 
320  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
321  {
322  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
323  }
324 
325  ++viewIndex;
326  }
327 
328  return viewsDescriptor;
329 }
330 
331 /// A Pooling2dDescriptor for the Pooling2dLayer.
333 {
335  : m_PoolType(PoolingAlgorithm::Max)
336  , m_PadLeft(0)
337  , m_PadRight(0)
338  , m_PadTop(0)
339  , m_PadBottom(0)
340  , m_PoolWidth(0)
341  , m_PoolHeight(0)
342  , m_StrideX(0)
343  , m_StrideY(0)
344  , m_OutputShapeRounding(OutputShapeRounding::Floor)
345  , m_PaddingMethod(PaddingMethod::Exclude)
346  , m_DataLayout(DataLayout::NCHW)
347  {}
348 
349  bool operator ==(const Pooling2dDescriptor& rhs) const
350  {
351  return m_PoolType == rhs.m_PoolType &&
352  m_PadLeft == rhs.m_PadLeft &&
353  m_PadRight == rhs.m_PadRight &&
354  m_PadTop == rhs.m_PadTop &&
355  m_PadBottom == rhs.m_PadBottom &&
356  m_PoolWidth == rhs.m_PoolWidth &&
357  m_PoolHeight == rhs.m_PoolHeight &&
358  m_StrideX == rhs.m_StrideX &&
359  m_StrideY == rhs.m_StrideY &&
360  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
361  m_PaddingMethod == rhs.m_PaddingMethod &&
362  m_DataLayout == rhs.m_DataLayout;
363  }
364 
365  /// The pooling algorithm to use (Max. Average, L2).
367  /// Padding left value in the width dimension.
368  uint32_t m_PadLeft;
369  /// Padding right value in the width dimension.
370  uint32_t m_PadRight;
371  /// Padding top value in the height dimension.
372  uint32_t m_PadTop;
373  /// Padding bottom value in the height dimension.
374  uint32_t m_PadBottom;
375  /// Pooling width value.
376  uint32_t m_PoolWidth;
377  /// Pooling height value.
378  uint32_t m_PoolHeight;
379  /// Stride value when proceeding through input for the width dimension.
380  uint32_t m_StrideX;
381  /// Stride value when proceeding through input for the height dimension.
382  uint32_t m_StrideY;
383  /// The rounding method for the output shape. (Floor, Ceiling).
385  /// The padding method to be used. (Exclude, IgnoreValue).
387  /// The data layout to be used (NCHW, NHWC).
389 };
390 
391 /// A Pooling3dDescriptor for the Pooling3dLayer.
393 {
395  : m_PoolType(PoolingAlgorithm::Max)
396  , m_PadLeft(0)
397  , m_PadRight(0)
398  , m_PadTop(0)
399  , m_PadBottom(0)
400  , m_PadFront(0)
401  , m_PadBack(0)
402  , m_PoolWidth(0)
403  , m_PoolHeight(0)
404  , m_PoolDepth(0)
405  , m_StrideX(0)
406  , m_StrideY(0)
407  , m_StrideZ(0)
408  , m_OutputShapeRounding(OutputShapeRounding::Floor)
409  , m_PaddingMethod(PaddingMethod::Exclude)
410  , m_DataLayout(DataLayout::NCDHW)
411  {}
412 
413  bool operator ==(const Pooling3dDescriptor& rhs) const
414  {
415  return m_PoolType == rhs.m_PoolType &&
416  m_PadLeft == rhs.m_PadLeft &&
417  m_PadRight == rhs.m_PadRight &&
418  m_PadTop == rhs.m_PadTop &&
419  m_PadBottom == rhs.m_PadBottom &&
420  m_PadFront == rhs.m_PadFront &&
421  m_PadBack == rhs.m_PadBack &&
422  m_PoolWidth == rhs.m_PoolWidth &&
423  m_PoolHeight == rhs.m_PoolHeight &&
424  m_PoolDepth == rhs.m_PoolDepth &&
425  m_StrideX == rhs.m_StrideX &&
426  m_StrideY == rhs.m_StrideY &&
427  m_StrideZ == rhs.m_StrideZ &&
428  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
429  m_PaddingMethod == rhs.m_PaddingMethod &&
430  m_DataLayout == rhs.m_DataLayout;
431  }
432 
433  /// The pooling algorithm to use (Max. Average, L2).
435  /// Padding left value in the width dimension.
436  uint32_t m_PadLeft;
437  /// Padding right value in the width dimension.
438  uint32_t m_PadRight;
439  /// Padding top value in the height dimension.
440  uint32_t m_PadTop;
441  /// Padding bottom value in the height dimension.
442  uint32_t m_PadBottom;
443  /// Padding front value in the depth dimension.
444  uint32_t m_PadFront;
445  /// Padding back value in the depth dimension.
446  uint32_t m_PadBack;
447  /// Pooling width value.
448  uint32_t m_PoolWidth;
449  /// Pooling height value.
450  uint32_t m_PoolHeight;
451  /// Pooling depth value.
452  uint32_t m_PoolDepth;
453  /// Stride value when proceeding through input for the width dimension.
454  uint32_t m_StrideX;
455  /// Stride value when proceeding through input for the height dimension.
456  uint32_t m_StrideY;
457  /// Stride value when proceeding through input for the depth dimension.
458  uint32_t m_StrideZ;
459  /// The rounding method for the output shape. (Floor, Ceiling).
461  /// The padding method to be used. (Exclude, IgnoreValue).
463  /// The data layout to be used (NCDHW, NDHWC).
465 };
466 
467 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
469 {
471  : m_BiasEnabled(false)
472  , m_TransposeWeightMatrix(false)
473  , m_ConstantWeights(true)
474  {}
475 
476  bool operator ==(const FullyConnectedDescriptor& rhs) const
477  {
478  return m_BiasEnabled == rhs.m_BiasEnabled
479  && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
480  && m_ConstantWeights == rhs.m_ConstantWeights;
481  }
482 
483  /// Get the number of views/inputs.
484  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
485  uint32_t GetNumViews() const;
486 
487  /// Get the number of views/inputs.
488  uint32_t GetNumInputs() const;
489 
490  /// Enable/disable bias.
492  /// Enable/disable transpose weight matrix.
494  /// Enable/disable constant weights and biases.
496 };
497 
498 /// A Convolution2dDescriptor for the Convolution2dLayer.
500 {
502  : m_PadLeft(0)
503  , m_PadRight(0)
504  , m_PadTop(0)
505  , m_PadBottom(0)
506  , m_StrideX(1)
507  , m_StrideY(1)
508  , m_DilationX(1)
509  , m_DilationY(1)
510  , m_BiasEnabled(false)
511  , m_DataLayout(DataLayout::NCHW)
512  {}
513 
514  bool operator ==(const Convolution2dDescriptor& rhs) const
515  {
516  return m_PadLeft == rhs.m_PadLeft &&
517  m_PadRight == rhs.m_PadRight &&
518  m_PadTop == rhs.m_PadTop &&
519  m_PadBottom == rhs.m_PadBottom &&
520  m_StrideX == rhs.m_StrideX &&
521  m_StrideY == rhs.m_StrideY &&
522  m_DilationX == rhs.m_DilationX &&
523  m_DilationY == rhs.m_DilationY &&
524  m_BiasEnabled == rhs.m_BiasEnabled &&
525  m_DataLayout == rhs.m_DataLayout;
526  }
527  uint32_t GetNumInputs() const;
528 
529 
530  /// Padding left value in the width dimension.
531  uint32_t m_PadLeft;
532  /// Padding right value in the width dimension.
533  uint32_t m_PadRight;
534  /// Padding top value in the height dimension.
535  uint32_t m_PadTop;
536  /// Padding bottom value in the height dimension.
537  uint32_t m_PadBottom;
538  /// Stride value when proceeding through input for the width dimension.
539  uint32_t m_StrideX;
540  /// Stride value when proceeding through input for the height dimension.
541  uint32_t m_StrideY;
542  /// Dilation along x axis
543  uint32_t m_DilationX;
544  /// Dilation along y axis
545  uint32_t m_DilationY;
546  /// Enable/disable bias.
548  /// The data layout to be used (NCHW, NHWC).
550 };
551 
552 /// A Convolution3dDescriptor for the Convolution3dLayer.
554 {
556  : m_PadLeft(0)
557  , m_PadRight(0)
558  , m_PadTop(0)
559  , m_PadBottom(0)
560  , m_PadFront(0)
561  , m_PadBack(0)
562  , m_StrideX(1)
563  , m_StrideY(1)
564  , m_StrideZ(1)
565  , m_DilationX(1)
566  , m_DilationY(1)
567  , m_DilationZ(1)
568  , m_BiasEnabled(false)
569  , m_DataLayout(DataLayout::NDHWC)
570  {}
571 
572  bool operator ==(const Convolution3dDescriptor& rhs) const
573  {
574  return m_PadLeft == rhs.m_PadLeft &&
575  m_PadRight == rhs.m_PadRight &&
576  m_PadTop == rhs.m_PadTop &&
577  m_PadBottom == rhs.m_PadBottom &&
578  m_PadFront == rhs.m_PadFront &&
579  m_PadBack == rhs.m_PadBack &&
580  m_StrideX == rhs.m_StrideX &&
581  m_StrideY == rhs.m_StrideY &&
582  m_StrideZ == rhs.m_StrideZ &&
583  m_DilationX == rhs.m_DilationX &&
584  m_DilationY == rhs.m_DilationY &&
585  m_DilationZ == rhs.m_DilationZ &&
586  m_BiasEnabled == rhs.m_BiasEnabled &&
587  m_DataLayout == rhs.m_DataLayout;
588  }
589 
590  /// Get the number of views/inputs.
591  uint32_t GetNumInputs() const;
592 
593  /// Padding left value in the width dimension.
594  uint32_t m_PadLeft;
595  /// Padding right value in the width dimension.
596  uint32_t m_PadRight;
597  /// Padding top value in the height dimension.
598  uint32_t m_PadTop;
599  /// Padding bottom value in the height dimension.
600  uint32_t m_PadBottom;
601  /// Padding front value in the depth dimension.
602  uint32_t m_PadFront;
603  /// Padding back value in the depth dimension.
604  uint32_t m_PadBack;
605  /// Stride value when proceeding through input for the width dimension.
606  uint32_t m_StrideX;
607  /// Stride value when proceeding through input for the height dimension.
608  uint32_t m_StrideY;
609  /// Stride value when proceeding through input for the depth dimension.
610  uint32_t m_StrideZ;
611  /// Dilation along x axis
612  uint32_t m_DilationX;
613  /// Dilation along y axis
614  uint32_t m_DilationY;
615  /// Dilation along z axis
616  uint32_t m_DilationZ;
617  /// Enable/disable bias.
619  /// The data layout to be used (NDHWC, NCDHW).
621 };
622 
623 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
625 {
627  : m_PadLeft(0)
628  , m_PadRight(0)
629  , m_PadTop(0)
630  , m_PadBottom(0)
631  , m_StrideX(1)
632  , m_StrideY(1)
633  , m_DilationX(1)
634  , m_DilationY(1)
635  , m_BiasEnabled(false)
636  , m_DataLayout(DataLayout::NCHW)
637  {}
638 
640  {
641  return m_PadLeft == rhs.m_PadLeft &&
642  m_PadRight == rhs.m_PadRight &&
643  m_PadTop == rhs.m_PadTop &&
644  m_PadBottom == rhs.m_PadBottom &&
645  m_StrideX == rhs.m_StrideX &&
646  m_StrideY == rhs.m_StrideY &&
647  m_DilationX == rhs.m_DilationX &&
648  m_DilationY == rhs.m_DilationY &&
649  m_BiasEnabled == rhs.m_BiasEnabled &&
650  m_DataLayout == rhs.m_DataLayout;
651  }
652 
653  /// Get the number of views/inputs.
654  uint32_t GetNumInputs() const;
655 
656  /// Padding left value in the width dimension.
657  uint32_t m_PadLeft;
658  /// Padding right value in the width dimension.
659  uint32_t m_PadRight;
660  /// Padding top value in the height dimension.
661  uint32_t m_PadTop;
662  /// Padding bottom value in the height dimension.
663  uint32_t m_PadBottom;
664  /// Stride value when proceeding through input for the width dimension.
665  uint32_t m_StrideX;
666  /// Stride value when proceeding through input for the height dimension.
667  uint32_t m_StrideY;
668  /// Dilation factor value for width dimension.
669  uint32_t m_DilationX;
670  /// Dilation factor value for height dimension.
671  uint32_t m_DilationY;
672  /// Enable/disable bias.
674  /// The data layout to be used (NCHW, NHWC).
676 };
677 
679 {
681  : m_MaxDetections(0)
682  , m_MaxClassesPerDetection(1)
683  , m_DetectionsPerClass(1)
684  , m_NmsScoreThreshold(0)
685  , m_NmsIouThreshold(0)
686  , m_NumClasses(0)
687  , m_UseRegularNms(false)
688  , m_ScaleX(0)
689  , m_ScaleY(0)
690  , m_ScaleW(0)
691  , m_ScaleH(0)
692  {}
693 
695  {
696  return m_MaxDetections == rhs.m_MaxDetections &&
697  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
698  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
699  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
700  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
701  m_NumClasses == rhs.m_NumClasses &&
702  m_UseRegularNms == rhs.m_UseRegularNms &&
703  m_ScaleX == rhs.m_ScaleX &&
704  m_ScaleY == rhs.m_ScaleY &&
705  m_ScaleW == rhs.m_ScaleW &&
706  m_ScaleH == rhs.m_ScaleH;
707  }
708 
709  /// Maximum numbers of detections.
710  uint32_t m_MaxDetections;
711  /// Maximum numbers of classes per detection, used in Fast NMS.
713  /// Detections per classes, used in Regular NMS.
715  /// NMS score threshold.
717  /// Intersection over union threshold.
719  /// Number of classes.
720  uint32_t m_NumClasses;
721  /// Use Regular NMS.
723  /// Center size encoding scale x.
724  float m_ScaleX;
725  /// Center size encoding scale y.
726  float m_ScaleY;
727  /// Center size encoding scale weight.
728  float m_ScaleW;
729  /// Center size encoding scale height.
730  float m_ScaleH;
731 };
732 
733 /// A NormalizationDescriptor for the NormalizationLayer.
735 {
737  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
738  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
739  , m_NormSize(0)
740  , m_Alpha(0.f)
741  , m_Beta(0.f)
742  , m_K(0.f)
743  , m_DataLayout(DataLayout::NCHW)
744  {}
745 
746  bool operator ==(const NormalizationDescriptor& rhs) const
747  {
748  return m_NormChannelType == rhs.m_NormChannelType &&
749  m_NormMethodType == rhs.m_NormMethodType &&
750  m_NormSize == rhs.m_NormSize &&
751  m_Alpha == rhs.m_Alpha &&
752  m_Beta == rhs.m_Beta &&
753  m_K == rhs.m_K &&
754  m_DataLayout == rhs.m_DataLayout;
755  }
756 
757  /// Normalization channel algorithm to use (Across, Within).
759  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
761  /// Depth radius value.
762  uint32_t m_NormSize;
763  /// Alpha value for the normalization equation.
764  float m_Alpha;
765  /// Beta value for the normalization equation.
766  float m_Beta;
767  /// Kappa value used for the across channel normalization equation.
768  float m_K;
769  /// The data layout to be used (NCHW, NHWC).
771 };
772 
773 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
775 {
777  : m_Eps(1e-12f)
778  , m_DataLayout(DataLayout::NCHW)
779  {}
780 
781  bool operator ==(const L2NormalizationDescriptor& rhs) const
782  {
783  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
784  }
785 
786  /// Used to avoid dividing by zero.
787  float m_Eps;
788  /// The data layout to be used (NCHW, NHWC).
790 };
791 
792 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
794 {
796  : m_Eps(0.0001f)
797  , m_DataLayout(DataLayout::NCHW)
798  {}
799 
801  {
802  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
803  }
804 
805  /// Value to add to the variance. Used to avoid dividing by zero.
806  float m_Eps;
807  /// The data layout to be used (NCHW, NHWC).
809 };
810 
811 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
813 {
815  : m_Gamma(1.0f)
816  , m_Beta(0.0f)
817  , m_Eps(1e-12f)
818  , m_DataLayout(DataLayout::NCHW)
819  {}
820 
822  {
823  return m_Gamma == rhs.m_Gamma &&
824  m_Beta == rhs.m_Beta &&
825  m_Eps == rhs.m_Eps &&
826  m_DataLayout == rhs.m_DataLayout;
827  }
828 
829  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
830  float m_Gamma;
831  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
832  float m_Beta;
833  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
834  float m_Eps;
835  /// The data layout to be used (NCHW, NHWC).
837 };
838 
839 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
841 {
843  : m_BlockShape({1, 1})
844  , m_Crops({{0, 0}, {0, 0}})
845  , m_DataLayout(DataLayout::NCHW)
846  {}
847 
848  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
849  std::vector<std::pair<unsigned int, unsigned int>> crops)
850  : m_BlockShape(blockShape)
851  , m_Crops(crops)
852  , m_DataLayout(DataLayout::NCHW)
853  {}
854 
855  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
856  {
857  return m_BlockShape == rhs.m_BlockShape &&
858  m_Crops == rhs.m_Crops &&
859  m_DataLayout == rhs.m_DataLayout;
860  }
861 
862  /// Block shape values.
863  std::vector<unsigned int> m_BlockShape;
864  /// The values to crop from the input dimension.
865  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
866  /// The data layout to be used (NCHW, NHWC).
868 };
869 
870 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
872 {
874  : m_Min(-6.0f)
875  , m_Max(6.0f)
876  {}
877 
879  {
880  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
881  }
882 
883  /// Minimum value.
884  float m_Min;
885  /// Maximum value.
886  float m_Max;
887 };
888 
889 /// A FillDescriptor for the FillLayer
891 {
893  : m_Value(0)
894  {}
895 
896  FillDescriptor(const float& value)
897  : m_Value(value)
898  {}
899 
900  bool operator ==(const FillDescriptor& rhs) const
901  {
902  return m_Value == rhs.m_Value;
903  }
904 
905  float m_Value;
906 };
907 
908 /// A GatherDescriptor for the GatherLayer.
910 {
912  : m_Axis(0)
913  {}
914 
915  GatherDescriptor(int32_t axis)
916  : m_Axis(axis)
917  {}
918 
919  bool operator ==(const GatherDescriptor& rhs) const
920  {
921  return m_Axis == rhs.m_Axis;
922  }
923 
924  /// The axis in params to gather indices from
925  int32_t m_Axis;
926 };
927 
928 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
930  "ResizeBilinearDescriptor is not supported anymore. Use ResizeDescriptor instead.",
931  "22.08")
932  ResizeBilinearDescriptor : BaseDescriptor
933 {
934  ResizeBilinearDescriptor()
935  : m_TargetWidth(0)
936  , m_TargetHeight(0)
937  , m_DataLayout(DataLayout::NCHW)
938  , m_AlignCorners(false)
939  , m_HalfPixelCenters(false)
940  {}
941 
943  bool operator ==(const ResizeBilinearDescriptor& rhs) const
944  {
945  return m_TargetWidth == rhs.m_TargetWidth &&
946  m_TargetHeight == rhs.m_TargetHeight &&
947  m_DataLayout == rhs.m_DataLayout &&
948  m_AlignCorners == rhs.m_AlignCorners &&
949  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
950  }
952 
953  /// Target width value.
954  uint32_t m_TargetWidth;
955  /// Target height value.
956  uint32_t m_TargetHeight;
957  /// The data layout to be used (NCHW, NHWC).
958  DataLayout m_DataLayout;
959  /// Aligned corners
960  bool m_AlignCorners;
961  /// Half Pixel Centers
962  bool m_HalfPixelCenters;
963 };
964 
965 /// A ResizeDescriptor for the ResizeLayer.
967 {
969  : m_TargetWidth(0)
970  , m_TargetHeight(0)
971  , m_Method(ResizeMethod::NearestNeighbor)
972  , m_DataLayout(DataLayout::NCHW)
973  , m_AlignCorners(false)
974  , m_HalfPixelCenters(false)
975  {}
976 
977  bool operator ==(const ResizeDescriptor& rhs) const
978  {
979  return m_TargetWidth == rhs.m_TargetWidth &&
980  m_TargetHeight == rhs.m_TargetHeight &&
981  m_Method == rhs.m_Method &&
982  m_DataLayout == rhs.m_DataLayout &&
983  m_AlignCorners == rhs.m_AlignCorners &&
984  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
985  }
986 
987  /// Target width value.
988  uint32_t m_TargetWidth;
989  /// Target height value.
990  uint32_t m_TargetHeight;
991  /// The Interpolation method to use
992  /// (Bilinear, NearestNeighbor).
994  /// The data layout to be used (NCHW, NHWC).
996  /// Aligned corners
998  /// Half Pixel Centers
1000 };
1001 
1002 
1003 /// A ReshapeDescriptor for the ReshapeLayer.
1005 {
1007  : m_TargetShape()
1008  {}
1009 
1011  : m_TargetShape(shape)
1012  {}
1013 
1014  bool operator ==(const ReshapeDescriptor& rhs) const
1015  {
1016  return m_TargetShape == rhs.m_TargetShape;
1017  }
1018 
1019  /// Target shape value.
1021 };
1022 
1023 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
1025 {
1027  : m_BlockShape({1, 1})
1028  , m_PadList({{0, 0}, {0, 0}})
1029  , m_DataLayout(DataLayout::NCHW)
1030  {}
1031 
1032  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
1033  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1034  : m_BlockShape(blockShape)
1035  , m_PadList(padList)
1036  , m_DataLayout(DataLayout::NCHW)
1037  {}
1038 
1039  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
1040  {
1041  return m_BlockShape == rhs.m_BlockShape &&
1042  m_PadList == rhs.m_PadList &&
1043  m_DataLayout == rhs.m_DataLayout;
1044  }
1045 
1046  /// Block shape value.
1047  std::vector<unsigned int> m_BlockShape;
1048  /// @brief Specifies the padding values for the input dimension:
1049  /// heightPad{top, bottom} widthPad{left, right}.
1050  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1051  /// The data layout to be used (NCHW, NHWC).
1053 };
1054 
1055 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
1057 {
1060  {}
1061 
1062  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
1063  : m_BlockSize(blockSize)
1064  , m_DataLayout(dataLayout)
1065  {}
1066 
1067  bool operator ==(const SpaceToDepthDescriptor& rhs) const
1068  {
1069  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
1070  }
1071 
1072  /// Scalar specifying the input block size. It must be >= 1
1073  unsigned int m_BlockSize;
1074 
1075  /// The data layout to be used (NCHW, NHWC).
1077 };
1078 
1079 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
1081 
1082 /// An LstmDescriptor for the LstmLayer.
1084 {
1086  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
1087  , m_ClippingThresCell(0.0)
1088  , m_ClippingThresProj(0.0)
1089  , m_CifgEnabled(true)
1090  , m_PeepholeEnabled(false)
1091  , m_ProjectionEnabled(false)
1092  , m_LayerNormEnabled(false)
1093  , m_TimeMajor(false)
1094  , m_InputIntermediateScale(0.0)
1095  , m_ForgetIntermediateScale(0.0)
1096  , m_CellIntermediateScale(0.0)
1097  , m_OutputIntermediateScale(0.0)
1098  , m_HiddenStateZeroPoint(0)
1099  , m_HiddenStateScale(0.0)
1100  {}
1101 
1102  bool operator ==(const LstmDescriptor& rhs) const
1103  {
1104  return m_ActivationFunc == rhs.m_ActivationFunc &&
1105  m_ClippingThresCell == rhs.m_ClippingThresCell &&
1106  m_ClippingThresProj == rhs.m_ClippingThresProj &&
1107  m_CifgEnabled == rhs.m_CifgEnabled &&
1108  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1109  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1110  m_TimeMajor == rhs.m_TimeMajor &&
1111  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1112  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1113  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1114  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1115  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1116  m_HiddenStateScale == rhs.m_HiddenStateScale;
1117  }
1118 
1119  /// @brief The activation function to use.
1120  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
1122  /// Clipping threshold value for the cell state.
1124  /// Clipping threshold value for the projection.
1126  /// Enable/disable cifg (coupled input & forget gate).
1128  /// Enable/disable peephole.
1130  /// Enable/disable the projection layer.
1132  /// Enable/disable layer normalization
1134  /// Enable/disable time major
1136  /// Input intermediate quantization scale
1138  /// Forget intermediate quantization scale
1140  /// Cell intermediate quantization scale
1142  /// Output intermediate quantization scale
1144  /// Hidden State zero point
1146  /// Hidden State quantization scale
1148 };
1149 
1151 
1152 /// A MeanDescriptor for the MeanLayer.
1154 {
1156  : m_Axis()
1157  , m_KeepDims(false)
1158  {}
1159 
1160  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
1161  : m_Axis(axis)
1162  , m_KeepDims(keepDims)
1163  {}
1164 
1165  bool operator ==(const MeanDescriptor& rhs) const
1166  {
1167  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
1168  }
1169 
1170  /// Values for the dimensions to reduce.
1171  std::vector<unsigned int> m_Axis;
1172  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
1174 };
1175 
1176 /// A PadDescriptor for the PadLayer.
1178 {
1179  PadDescriptor() : m_PadValue(0), m_PaddingMode(PaddingMode::Constant)
1180  {}
1181 
1182  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList,
1183  const float& padValue = 0,
1184  const PaddingMode& paddingMode = PaddingMode::Constant)
1185  : m_PadList(padList)
1186  , m_PadValue(padValue)
1187  , m_PaddingMode(paddingMode)
1188  {}
1189 
1190  bool operator ==(const PadDescriptor& rhs) const
1191  {
1192  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode;
1193  }
1194 
1195  /// @brief Specifies the padding for input dimension.
1196  /// First is the number of values to add before the tensor in the dimension.
1197  /// Second is the number of values to add after the tensor in the dimension.
1198  /// The number of pairs should match the number of dimensions in the input tensor.
1199  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1200 
1201  /// Optional value to use for padding, defaults to 0
1202  float m_PadValue;
1203 
1204  /// Specifies the Padding mode (Constant, Reflect or Symmetric)
1206 };
1207 
1208 /// A SliceDescriptor for the SliceLayer.
1210 {
1211  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1212  : m_Begin(begin)
1213  , m_Size(size)
1214  {}
1215 
1217  {}
1218 
1219  bool operator ==(const SliceDescriptor& rhs) const
1220  {
1221  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1222  }
1223 
1224  /// Beginning indices of the slice in each dimension.
1225  std::vector<unsigned int> m_Begin;
1226 
1227  /// Size of the slice in each dimension.
1228  std::vector<unsigned int> m_Size;
1229 };
1230 
1231 /// A StackDescriptor for the StackLayer.
1233 {
1235  : m_Axis(0)
1236  , m_NumInputs(0)
1237  , m_InputShape()
1238  {}
1239 
1240  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1241  : m_Axis(axis)
1242  , m_NumInputs(numInputs)
1243  , m_InputShape(inputShape)
1244  {}
1245 
1246  bool operator ==(const StackDescriptor& rhs) const
1247  {
1248  return m_Axis == rhs.m_Axis &&
1249  m_NumInputs == rhs.m_NumInputs &&
1250  m_InputShape == rhs.m_InputShape;
1251  }
1252 
1253  /// 0-based axis along which to stack the input tensors.
1254  uint32_t m_Axis;
1255  /// Number of input tensors.
1256  uint32_t m_NumInputs;
1257  /// Required shape of all input tensors.
1259 };
1260 
1261 /// A StandInDescriptor for the StandIn layer
1263 {
1265 
1266  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1267  : m_NumInputs(numInputs)
1268  , m_NumOutputs(numOutputs)
1269  {}
1270 
1271  bool operator ==(const StandInDescriptor& rhs) const
1272  {
1273  return m_NumInputs == rhs.m_NumInputs &&
1274  m_NumOutputs == rhs.m_NumOutputs;
1275  }
1276 
1277  /// Number of input tensors
1278  uint32_t m_NumInputs = 0;
1279  /// Number of output tensors
1280  uint32_t m_NumOutputs = 0;
1281 };
1282 
1283 /// A StridedSliceDescriptor for the StridedSliceLayer.
1285 {
1286  StridedSliceDescriptor(const std::vector<int>& begin,
1287  const std::vector<int>& end,
1288  const std::vector<int>& stride)
1289  : m_Begin(begin)
1290  , m_End(end)
1291  , m_Stride(stride)
1292  , m_BeginMask(0)
1293  , m_EndMask(0)
1294  , m_ShrinkAxisMask(0)
1295  , m_EllipsisMask(0)
1296  , m_NewAxisMask(0)
1297  , m_DataLayout(DataLayout::NCHW)
1298  {}
1299 
1301  : StridedSliceDescriptor({}, {}, {})
1302  {}
1303 
1304  bool operator ==(const StridedSliceDescriptor& rhs) const
1305  {
1306  return m_Begin == rhs.m_Begin &&
1307  m_End == rhs.m_End &&
1308  m_Stride == rhs.m_Stride &&
1309  m_BeginMask == rhs.m_BeginMask &&
1310  m_EndMask == rhs.m_EndMask &&
1311  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1312  m_EllipsisMask == rhs.m_EllipsisMask &&
1313  m_NewAxisMask == rhs.m_NewAxisMask &&
1314  m_DataLayout == rhs.m_DataLayout;
1315  }
1316 
1317  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1318  int GetStopForAxis(const TensorShape& inputShape,
1319  unsigned int axis,
1320  int startForAxis) const;
1321 
1322  /// Begin values for the input that will be sliced.
1323  std::vector<int> m_Begin;
1324  /// End values for the input that will be sliced.
1325  std::vector<int> m_End;
1326  /// Stride values for the input that will be sliced.
1327  std::vector<int> m_Stride;
1328 
1329  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1330  /// range is used for the dimension.
1331  int32_t m_BeginMask;
1332  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1333  /// is used for the dimension.
1334  int32_t m_EndMask;
1335  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1337  /// Ellipsis mask value.
1339  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1340  /// a new 1 dimension is inserted to this location of the output tensor.
1341  int32_t m_NewAxisMask;
1342 
1343  /// The data layout to be used (NCHW, NHWC).
1345 };
1346 
1347 /// A PreCompiledDescriptor for the PreCompiledLayer.
1349 {
1350  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1351  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1352  {}
1353 
1354  ~PreCompiledDescriptor() = default;
1355 
1356  unsigned int m_NumInputSlots;
1357  unsigned int m_NumOutputSlots;
1358 };
1359 
1360 /// A QLstmDescriptor for the QLstmLayer.
1362 {
1364  : m_CellClip(0.0)
1365  , m_ProjectionClip(0.0)
1366  , m_CifgEnabled(true)
1367  , m_PeepholeEnabled(false)
1368  , m_ProjectionEnabled(false)
1369  , m_LayerNormEnabled(false)
1370  , m_InputIntermediateScale(0.0)
1371  , m_ForgetIntermediateScale(0.0)
1372  , m_CellIntermediateScale(0.0)
1373  , m_OutputIntermediateScale(0.0)
1374  , m_HiddenStateZeroPoint(0)
1375  , m_HiddenStateScale(0.0)
1376  {}
1377 
1378  bool operator ==(const QLstmDescriptor& rhs) const
1379  {
1380  return m_CellClip == rhs.m_CellClip &&
1381  m_ProjectionClip == rhs.m_ProjectionClip &&
1382  m_CifgEnabled == rhs.m_CifgEnabled &&
1383  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1384  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1385  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1386  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1387  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1388  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1389  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1390  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1391  m_HiddenStateScale == rhs.m_HiddenStateScale;
1392  }
1393 
1394  /// Clipping threshold value for the cell state
1395  float m_CellClip;
1396  /// Clipping threshold value for the projection
1398  /// Enable/disable CIFG (coupled input & forget gate).
1400  /// Enable/disable peephole
1402  /// Enable/disable the projection layer
1404  /// Enable/disable layer normalization
1406  /// Input intermediate quantization scale
1408  /// Forget intermediate quantization scale
1410  /// Cell intermediate quantization scale
1412  /// Output intermediate quantization scale
1414  /// Hidden State zero point
1416  /// Hidden State quantization scale
1418 };
1419 
1420 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1422 {
1424  m_PadLeft(0),
1425  m_PadRight(0),
1426  m_PadTop(0),
1427  m_PadBottom(0),
1428  m_StrideX(0),
1429  m_StrideY(0),
1430  m_BiasEnabled(false),
1431  m_DataLayout(DataLayout::NCHW),
1432  m_OutputShapeEnabled(false)
1433  {}
1434 
1436  {
1437  return m_PadLeft == rhs.m_PadLeft &&
1438  m_PadRight == rhs.m_PadRight &&
1439  m_PadTop == rhs.m_PadTop &&
1440  m_PadBottom == rhs.m_PadBottom &&
1441  m_StrideX == rhs.m_StrideX &&
1442  m_StrideY == rhs.m_StrideY &&
1443  m_BiasEnabled == rhs.m_BiasEnabled &&
1444  m_DataLayout == rhs.m_DataLayout &&
1445  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1446  m_OutputShape == rhs.m_OutputShape;
1447  }
1448 
1449  /// Padding left value in the width dimension.
1450  uint32_t m_PadLeft;
1451  /// Padding right value in the width dimension.
1452  uint32_t m_PadRight;
1453  /// Padding top value in the height dimension.
1454  uint32_t m_PadTop;
1455  /// Padding bottom value in the height dimension.
1456  uint32_t m_PadBottom;
1457  /// Stride value when proceeding through input for the width dimension.
1458  uint32_t m_StrideX;
1459  /// Stride value when proceeding through input for the height dimension.
1460  uint32_t m_StrideY;
1461  /// Enable/disable bias.
1463  /// The data layout to be used (NCHW, NHWC).
1465  /// Output shape if it has been specified.
1467  std::vector<unsigned int> m_OutputShape;
1468 };
1469 
1470 /// A TransposeDescriptor for the TransposeLayer.
1472 {
1474  : m_DimMappings{}
1475  {}
1476 
1478  : m_DimMappings(dimMappings)
1479  {}
1480 
1481  bool operator ==(const TransposeDescriptor &rhs) const
1482  {
1483  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1484  }
1485 
1486  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1487  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1489 };
1490 
1491 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1493 {
1496  {}
1497 
1499  : m_Operation(operation)
1500  {}
1501 
1502  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1503  {
1504  return m_Operation == rhs.m_Operation;
1505  }
1506 
1507  /// Specifies the logical operation to execute
1509 };
1510 
1511 /// A ReduceDescriptor for the REDUCE operators.
1513 {
1515  : m_KeepDims(false)
1516  , m_vAxis()
1517  , m_ReduceOperation(ReduceOperation::Sum)
1518  {}
1519 
1520  bool operator ==(const ReduceDescriptor& rhs) const
1521  {
1522  return m_KeepDims == rhs.m_KeepDims &&
1523  m_vAxis == rhs.m_vAxis &&
1524  m_ReduceOperation == rhs.m_ReduceOperation;
1525  }
1526 
1527  /// if true then output shape has no change.
1529  /// The indices of the dimensions to reduce.
1530  std::vector<uint32_t> m_vAxis;
1531  /// Specifies the reduction operation to execute
1533 };
1534 
1535 /// A ChannelShuffleDescriptor for the ChannelShuffle operator
1537 {
1539  : m_NumGroups(0), m_Axis(0)
1540  {}
1541 
1542  ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
1543  : m_NumGroups(numGroups), m_Axis(axis)
1544  {}
1545 
1546  bool operator ==(const ChannelShuffleDescriptor& rhs) const
1547  {
1548  return m_NumGroups == rhs.m_NumGroups;
1549  }
1550 
1551  /// Number of groups for the channel shuffle operation
1552  uint32_t m_NumGroups;
1553  /// Axis to apply channel shuffle operation on
1554  uint32_t m_Axis;
1555 };
1556 
1557 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:62
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_PoolDepth
Pooling depth value.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
bool IsNull() const override
Definition: Descriptors.hpp:32
uint32_t m_PadBack
Padding back value in the depth dimension.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0, const PaddingMode &paddingMode=PaddingMode::Constant)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:44
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:193
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
bool m_TimeMajor
Enable/disable time major.
ChannelShuffleDescriptor(const uint32_t &numGroups, const uint32_t &axis)
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:136
virtual ~BaseDescriptor()=default
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:118
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:174
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:95
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
ComparisonOperation
Definition: Types.hpp:108
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:143
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
Definition: Descriptors.hpp:30
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t GetNumInputs(bool biasEnabled)
DataType
Definition: Types.hpp:48
float m_NmsIouThreshold
Intersection over union threshold.
float m_CellIntermediateScale
Cell intermediate quantization scale.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:42
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
ARMNN_NO_DEPRECATE_WARN_BEGIN struct ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ResizeBilinearQueueDescriptor is deprecated use ResizeQueueDescriptor instead", "22.08") ResizeBilinearQueueDescriptor
float m_InputIntermediateScale
Input intermediate quantization scale.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant)...
Definition: Types.hpp:186
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_HiddenStateScale
Hidden State quantization scale.
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:207
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:102
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:152
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:124
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_Axis
Axis to apply channel shuffle operation on.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
float m_ScaleY
Center size encoding scale y.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
NormalizationAlgorithmMethod
Definition: Types.hpp:199
A ChannelShuffleDescriptor for the ChannelShuffle operator.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
uint32_t m_DilationZ
Dilation along z axis.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
virtual bool IsNull() const
Definition: Descriptors.hpp:24
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Definition: Descriptors.hpp:85
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:86
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
float m_OutputIntermediateScale
Output intermediate quantization scale.
bool m_ConstantWeights
Enable/disable constant weights and biases.