ArmNN
 22.08
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
23 {
24  virtual bool IsNull() const { return false; }
25  virtual ~BaseDescriptor() = default;
26 };
27 
28 /// Null Descriptor used as a return value from the IConnectableLayer GetParameters method
29 /// by layers which do not have a descriptor
31 {
32  bool IsNull() const override { return true; }
33 };
34 
35 /// An ActivationDescriptor for the ActivationLayer.
37 {
39  : m_Function(ActivationFunction::Sigmoid)
40  , m_A(0)
41  , m_B(0)
42  {}
43 
45  float a = 0,
46  float b = 0)
47  : m_Function(activation)
48  , m_A(a)
49  , m_B(b)
50  {}
51 
52  bool operator ==(const ActivationDescriptor &rhs) const
53  {
54  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
55  }
56 
57  /// @brief The activation function to use
58  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
60  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
61  float m_A;
62  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
63  float m_B;
64 };
65 
66 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
68 {
70  : m_Function(ArgMinMaxFunction::Min)
71  , m_Axis(-1)
72  , m_Output_Type(armnn::DataType::Signed32)
73  {}
74 
75  bool operator ==(const ArgMinMaxDescriptor &rhs) const
76  {
77  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
78  }
79 
80  /// Specify if the function is to find Min or Max.
82  /// Axis to reduce across the input tensor.
83  int m_Axis;
84  /// Deprecated and will be removed in future release.
86 };
87 
88 /// A ComparisonDescriptor for the ComparisonLayer
90 {
93  {}
94 
96  : m_Operation(operation)
97  {}
98 
99  bool operator ==(const ComparisonDescriptor &rhs) const
100  {
101  return m_Operation == rhs.m_Operation;
102  }
103 
104  /// Specifies the comparison operation to execute
106 };
107 
108 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
110 {
113  {}
114 
116  : m_Operation(operation)
117  {}
118 
120  {
121  return m_Operation == rhs.m_Operation;
122  }
123 
124  /// Specifies the elementwiseUnary operation to execute
126 };
127 
128 /// A PermuteDescriptor for the PermuteLayer.
130 {
132  : m_DimMappings{}
133  {}
134 
136  : m_DimMappings(dimMappings)
137  {}
138 
139  bool operator ==(const PermuteDescriptor &rhs) const
140  {
141  return m_DimMappings.IsEqual(rhs.m_DimMappings);
142  }
143 
144  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
145  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
147 };
148 
149 /// A SoftmaxDescriptor for the SoftmaxLayer.
151 {
153  : m_Beta(1.0f)
154  , m_Axis(-1)
155  {}
156 
157  bool operator ==(const SoftmaxDescriptor& rhs) const
158  {
159  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
160  }
161 
162  /// Exponentiation value.
163  float m_Beta;
164  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
165  int m_Axis;
166 };
167 
168 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
170 
171 /// @brief An OriginsDescriptor for the ConcatLayer.
172 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
173 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
175 {
177  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
178  OriginsDescriptor(const OriginsDescriptor& other);
180 
182 
183  OriginsDescriptor& operator=(OriginsDescriptor rhs);
184 
185  bool operator ==(const OriginsDescriptor& rhs) const;
186 
187  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
188  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
189  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
190  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
191  /// Get the number of views.
192  uint32_t GetNumViews() const;
193  /// Get the number of dimensions.
194  uint32_t GetNumDimensions() const;
195  /// Return the view origin at the int value idx.
196  const uint32_t* GetViewOrigin(uint32_t idx) const;
197  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
198  /// The number of views must match number of elements in the new ordering array.
199  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
200  /// Swap the ViewsDescriptor values first and second.
201  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
202  /// Set the concatenation axis value.
203  void SetConcatAxis(unsigned int concatAxis);
204  /// Get the concatenation axis value.
205  unsigned int GetConcatAxis() const;
206 
207 private:
208  unsigned int m_ConcatAxis;
209  uint32_t m_NumViews;
210  uint32_t m_NumDimensions;
211  uint32_t** m_ViewOrigins;
212 };
213 
214 /// @brief A ViewsDescriptor for the SplitterLayer.
215 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
216 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
218 {
219  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
220  ViewsDescriptor(const ViewsDescriptor& other);
221  ViewsDescriptor();
223 
224  ~ViewsDescriptor();
225 
226  ViewsDescriptor& operator=(ViewsDescriptor rhs);
227 
228  bool operator ==(const ViewsDescriptor& rhs) const;
229 
230  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
231  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
232  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
233  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
234  /// @brief Set the size of the views. The arguments are: view, dimension, value.
235  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
236  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
237  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
238 
239  /// Get the number of views.
240  uint32_t GetNumViews() const;
241  /// Get the number of dimensions.
242  uint32_t GetNumDimensions() const;
243  /// Get the view origin at the int value idx.
244  const uint32_t* GetViewOrigin(uint32_t idx) const;
245  /// Get the view sizes at the int value idx.
246  const uint32_t* GetViewSizes(uint32_t idx) const;
247  /// Get the View Origins
248  const OriginsDescriptor& GetOrigins() const;
249 
250  /// Swap the ViewsDescriptor value first and second.
251  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
252 private:
253  OriginsDescriptor m_Origins;
254  uint32_t** m_ViewSizes;
255 };
256 
257 
258 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
259 /// concatenation of a number of input tensors.
260 template <typename TensorShapeIt>
262  TensorShapeIt last,
263  unsigned int concatenationDimension)
264 {
265  auto numInputs = std::distance(first, last);
266 
267  if (numInputs < 2)
268  {
269  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
270  }
271 
272  const auto& firstInputShape = *first;
273 
274  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
275  for (auto it = first + 1; it != last; ++it)
276  {
277  if (it->GetNumDimensions() != numDimensions)
278  {
279  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
280  }
281  }
282 
283  if (concatenationDimension >= numDimensions)
284  {
285  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
286  }
287 
288  for (auto it = first; it != last; ++it)
289  {
290  for (unsigned int d = 0; d < numDimensions; ++d)
291  {
292  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
293  if (!dimSizeOk)
294  {
295  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
296  " except the concatenation dimension");
297  }
298  }
299  }
300 
301  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
302  viewsDescriptor.SetConcatAxis(concatenationDimension);
303 
304  uint32_t viewIndex = 0u;
305  uint32_t coordAlongConcatDim = 0u;
306  for (auto it = first; it != last; ++it)
307  {
308  const auto& inputShape = *it;
309 
310  for (unsigned int i = 0; i < concatenationDimension; ++i)
311  {
312  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
313  }
314 
315  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
316  unsigned int dimSize = inputShape[concatenationDimension];
317  coordAlongConcatDim += dimSize;
318 
319 
320  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
321  {
322  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
323  }
324 
325  ++viewIndex;
326  }
327 
328  return viewsDescriptor;
329 }
330 
331 /// A Pooling2dDescriptor for the Pooling2dLayer.
333 {
335  : m_PoolType(PoolingAlgorithm::Max)
336  , m_PadLeft(0)
337  , m_PadRight(0)
338  , m_PadTop(0)
339  , m_PadBottom(0)
340  , m_PoolWidth(0)
341  , m_PoolHeight(0)
342  , m_StrideX(0)
343  , m_StrideY(0)
344  , m_OutputShapeRounding(OutputShapeRounding::Floor)
345  , m_PaddingMethod(PaddingMethod::Exclude)
346  , m_DataLayout(DataLayout::NCHW)
347  {}
348 
349  bool operator ==(const Pooling2dDescriptor& rhs) const
350  {
351  return m_PoolType == rhs.m_PoolType &&
352  m_PadLeft == rhs.m_PadLeft &&
353  m_PadRight == rhs.m_PadRight &&
354  m_PadTop == rhs.m_PadTop &&
355  m_PadBottom == rhs.m_PadBottom &&
356  m_PoolWidth == rhs.m_PoolWidth &&
357  m_PoolHeight == rhs.m_PoolHeight &&
358  m_StrideX == rhs.m_StrideX &&
359  m_StrideY == rhs.m_StrideY &&
360  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
361  m_PaddingMethod == rhs.m_PaddingMethod &&
362  m_DataLayout == rhs.m_DataLayout;
363  }
364 
365  /// The pooling algorithm to use (Max. Average, L2).
367  /// Padding left value in the width dimension.
368  uint32_t m_PadLeft;
369  /// Padding right value in the width dimension.
370  uint32_t m_PadRight;
371  /// Padding top value in the height dimension.
372  uint32_t m_PadTop;
373  /// Padding bottom value in the height dimension.
374  uint32_t m_PadBottom;
375  /// Pooling width value.
376  uint32_t m_PoolWidth;
377  /// Pooling height value.
378  uint32_t m_PoolHeight;
379  /// Stride value when proceeding through input for the width dimension.
380  uint32_t m_StrideX;
381  /// Stride value when proceeding through input for the height dimension.
382  uint32_t m_StrideY;
383  /// The rounding method for the output shape. (Floor, Ceiling).
385  /// The padding method to be used. (Exclude, IgnoreValue).
387  /// The data layout to be used (NCHW, NHWC).
389 };
390 
391 /// A Pooling3dDescriptor for the Pooling3dLayer.
393 {
395  : m_PoolType(PoolingAlgorithm::Max)
396  , m_PadLeft(0)
397  , m_PadRight(0)
398  , m_PadTop(0)
399  , m_PadBottom(0)
400  , m_PadFront(0)
401  , m_PadBack(0)
402  , m_PoolWidth(0)
403  , m_PoolHeight(0)
404  , m_PoolDepth(0)
405  , m_StrideX(0)
406  , m_StrideY(0)
407  , m_StrideZ(0)
408  , m_OutputShapeRounding(OutputShapeRounding::Floor)
409  , m_PaddingMethod(PaddingMethod::Exclude)
410  , m_DataLayout(DataLayout::NCDHW)
411  {}
412 
413  bool operator ==(const Pooling3dDescriptor& rhs) const
414  {
415  return m_PoolType == rhs.m_PoolType &&
416  m_PadLeft == rhs.m_PadLeft &&
417  m_PadRight == rhs.m_PadRight &&
418  m_PadTop == rhs.m_PadTop &&
419  m_PadBottom == rhs.m_PadBottom &&
420  m_PadFront == rhs.m_PadFront &&
421  m_PadBack == rhs.m_PadBack &&
422  m_PoolWidth == rhs.m_PoolWidth &&
423  m_PoolHeight == rhs.m_PoolHeight &&
424  m_PoolDepth == rhs.m_PoolDepth &&
425  m_StrideX == rhs.m_StrideX &&
426  m_StrideY == rhs.m_StrideY &&
427  m_StrideZ == rhs.m_StrideZ &&
428  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
429  m_PaddingMethod == rhs.m_PaddingMethod &&
430  m_DataLayout == rhs.m_DataLayout;
431  }
432 
433  /// The pooling algorithm to use (Max. Average, L2).
435  /// Padding left value in the width dimension.
436  uint32_t m_PadLeft;
437  /// Padding right value in the width dimension.
438  uint32_t m_PadRight;
439  /// Padding top value in the height dimension.
440  uint32_t m_PadTop;
441  /// Padding bottom value in the height dimension.
442  uint32_t m_PadBottom;
443  /// Padding front value in the depth dimension.
444  uint32_t m_PadFront;
445  /// Padding back value in the depth dimension.
446  uint32_t m_PadBack;
447  /// Pooling width value.
448  uint32_t m_PoolWidth;
449  /// Pooling height value.
450  uint32_t m_PoolHeight;
451  /// Pooling depth value.
452  uint32_t m_PoolDepth;
453  /// Stride value when proceeding through input for the width dimension.
454  uint32_t m_StrideX;
455  /// Stride value when proceeding through input for the height dimension.
456  uint32_t m_StrideY;
457  /// Stride value when proceeding through input for the depth dimension.
458  uint32_t m_StrideZ;
459  /// The rounding method for the output shape. (Floor, Ceiling).
461  /// The padding method to be used. (Exclude, IgnoreValue).
463  /// The data layout to be used (NCDHW, NDHWC).
465 };
466 
467 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
469 {
471  : m_BiasEnabled(false)
472  , m_TransposeWeightMatrix(false)
473  , m_ConstantWeights(true)
474  {}
475 
476  bool operator ==(const FullyConnectedDescriptor& rhs) const
477  {
478  return m_BiasEnabled == rhs.m_BiasEnabled
479  && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
480  && m_ConstantWeights == rhs.m_ConstantWeights;
481  }
482 
483  /// Get the number of inputs.
484  uint32_t GetNumInputs() const;
485 
486  /// Enable/disable bias.
488  /// Enable/disable transpose weight matrix.
490  /// Enable/disable constant weights and biases.
492 };
493 
494 /// A Convolution2dDescriptor for the Convolution2dLayer.
496 {
498  : m_PadLeft(0)
499  , m_PadRight(0)
500  , m_PadTop(0)
501  , m_PadBottom(0)
502  , m_StrideX(1)
503  , m_StrideY(1)
504  , m_DilationX(1)
505  , m_DilationY(1)
506  , m_BiasEnabled(false)
507  , m_DataLayout(DataLayout::NCHW)
508  {}
509 
510  bool operator ==(const Convolution2dDescriptor& rhs) const
511  {
512  return m_PadLeft == rhs.m_PadLeft &&
513  m_PadRight == rhs.m_PadRight &&
514  m_PadTop == rhs.m_PadTop &&
515  m_PadBottom == rhs.m_PadBottom &&
516  m_StrideX == rhs.m_StrideX &&
517  m_StrideY == rhs.m_StrideY &&
518  m_DilationX == rhs.m_DilationX &&
519  m_DilationY == rhs.m_DilationY &&
520  m_BiasEnabled == rhs.m_BiasEnabled &&
521  m_DataLayout == rhs.m_DataLayout;
522  }
523  uint32_t GetNumInputs() const;
524 
525 
526  /// Padding left value in the width dimension.
527  uint32_t m_PadLeft;
528  /// Padding right value in the width dimension.
529  uint32_t m_PadRight;
530  /// Padding top value in the height dimension.
531  uint32_t m_PadTop;
532  /// Padding bottom value in the height dimension.
533  uint32_t m_PadBottom;
534  /// Stride value when proceeding through input for the width dimension.
535  uint32_t m_StrideX;
536  /// Stride value when proceeding through input for the height dimension.
537  uint32_t m_StrideY;
538  /// Dilation along x axis
539  uint32_t m_DilationX;
540  /// Dilation along y axis
541  uint32_t m_DilationY;
542  /// Enable/disable bias.
544  /// The data layout to be used (NCHW, NHWC).
546 };
547 
548 /// A Convolution3dDescriptor for the Convolution3dLayer.
550 {
552  : m_PadLeft(0)
553  , m_PadRight(0)
554  , m_PadTop(0)
555  , m_PadBottom(0)
556  , m_PadFront(0)
557  , m_PadBack(0)
558  , m_StrideX(1)
559  , m_StrideY(1)
560  , m_StrideZ(1)
561  , m_DilationX(1)
562  , m_DilationY(1)
563  , m_DilationZ(1)
564  , m_BiasEnabled(false)
565  , m_DataLayout(DataLayout::NDHWC)
566  {}
567 
568  bool operator ==(const Convolution3dDescriptor& rhs) const
569  {
570  return m_PadLeft == rhs.m_PadLeft &&
571  m_PadRight == rhs.m_PadRight &&
572  m_PadTop == rhs.m_PadTop &&
573  m_PadBottom == rhs.m_PadBottom &&
574  m_PadFront == rhs.m_PadFront &&
575  m_PadBack == rhs.m_PadBack &&
576  m_StrideX == rhs.m_StrideX &&
577  m_StrideY == rhs.m_StrideY &&
578  m_StrideZ == rhs.m_StrideZ &&
579  m_DilationX == rhs.m_DilationX &&
580  m_DilationY == rhs.m_DilationY &&
581  m_DilationZ == rhs.m_DilationZ &&
582  m_BiasEnabled == rhs.m_BiasEnabled &&
583  m_DataLayout == rhs.m_DataLayout;
584  }
585 
586  /// Get the number of views/inputs.
587  uint32_t GetNumInputs() const;
588 
589  /// Padding left value in the width dimension.
590  uint32_t m_PadLeft;
591  /// Padding right value in the width dimension.
592  uint32_t m_PadRight;
593  /// Padding top value in the height dimension.
594  uint32_t m_PadTop;
595  /// Padding bottom value in the height dimension.
596  uint32_t m_PadBottom;
597  /// Padding front value in the depth dimension.
598  uint32_t m_PadFront;
599  /// Padding back value in the depth dimension.
600  uint32_t m_PadBack;
601  /// Stride value when proceeding through input for the width dimension.
602  uint32_t m_StrideX;
603  /// Stride value when proceeding through input for the height dimension.
604  uint32_t m_StrideY;
605  /// Stride value when proceeding through input for the depth dimension.
606  uint32_t m_StrideZ;
607  /// Dilation along x axis
608  uint32_t m_DilationX;
609  /// Dilation along y axis
610  uint32_t m_DilationY;
611  /// Dilation along z axis
612  uint32_t m_DilationZ;
613  /// Enable/disable bias.
615  /// The data layout to be used (NDHWC, NCDHW).
617 };
618 
619 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
621 {
623  : m_PadLeft(0)
624  , m_PadRight(0)
625  , m_PadTop(0)
626  , m_PadBottom(0)
627  , m_StrideX(1)
628  , m_StrideY(1)
629  , m_DilationX(1)
630  , m_DilationY(1)
631  , m_BiasEnabled(false)
632  , m_DataLayout(DataLayout::NCHW)
633  {}
634 
636  {
637  return m_PadLeft == rhs.m_PadLeft &&
638  m_PadRight == rhs.m_PadRight &&
639  m_PadTop == rhs.m_PadTop &&
640  m_PadBottom == rhs.m_PadBottom &&
641  m_StrideX == rhs.m_StrideX &&
642  m_StrideY == rhs.m_StrideY &&
643  m_DilationX == rhs.m_DilationX &&
644  m_DilationY == rhs.m_DilationY &&
645  m_BiasEnabled == rhs.m_BiasEnabled &&
646  m_DataLayout == rhs.m_DataLayout;
647  }
648 
649  /// Get the number of views/inputs.
650  uint32_t GetNumInputs() const;
651 
652  /// Padding left value in the width dimension.
653  uint32_t m_PadLeft;
654  /// Padding right value in the width dimension.
655  uint32_t m_PadRight;
656  /// Padding top value in the height dimension.
657  uint32_t m_PadTop;
658  /// Padding bottom value in the height dimension.
659  uint32_t m_PadBottom;
660  /// Stride value when proceeding through input for the width dimension.
661  uint32_t m_StrideX;
662  /// Stride value when proceeding through input for the height dimension.
663  uint32_t m_StrideY;
664  /// Dilation factor value for width dimension.
665  uint32_t m_DilationX;
666  /// Dilation factor value for height dimension.
667  uint32_t m_DilationY;
668  /// Enable/disable bias.
670  /// The data layout to be used (NCHW, NHWC).
672 };
673 
675 {
677  : m_MaxDetections(0)
678  , m_MaxClassesPerDetection(1)
679  , m_DetectionsPerClass(1)
680  , m_NmsScoreThreshold(0)
681  , m_NmsIouThreshold(0)
682  , m_NumClasses(0)
683  , m_UseRegularNms(false)
684  , m_ScaleX(0)
685  , m_ScaleY(0)
686  , m_ScaleW(0)
687  , m_ScaleH(0)
688  {}
689 
691  {
692  return m_MaxDetections == rhs.m_MaxDetections &&
693  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
694  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
695  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
696  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
697  m_NumClasses == rhs.m_NumClasses &&
698  m_UseRegularNms == rhs.m_UseRegularNms &&
699  m_ScaleX == rhs.m_ScaleX &&
700  m_ScaleY == rhs.m_ScaleY &&
701  m_ScaleW == rhs.m_ScaleW &&
702  m_ScaleH == rhs.m_ScaleH;
703  }
704 
705  /// Maximum numbers of detections.
706  uint32_t m_MaxDetections;
707  /// Maximum numbers of classes per detection, used in Fast NMS.
709  /// Detections per classes, used in Regular NMS.
711  /// NMS score threshold.
713  /// Intersection over union threshold.
715  /// Number of classes.
716  uint32_t m_NumClasses;
717  /// Use Regular NMS.
719  /// Center size encoding scale x.
720  float m_ScaleX;
721  /// Center size encoding scale y.
722  float m_ScaleY;
723  /// Center size encoding scale weight.
724  float m_ScaleW;
725  /// Center size encoding scale height.
726  float m_ScaleH;
727 };
728 
729 /// A NormalizationDescriptor for the NormalizationLayer.
731 {
733  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
734  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
735  , m_NormSize(0)
736  , m_Alpha(0.f)
737  , m_Beta(0.f)
738  , m_K(0.f)
739  , m_DataLayout(DataLayout::NCHW)
740  {}
741 
742  bool operator ==(const NormalizationDescriptor& rhs) const
743  {
744  return m_NormChannelType == rhs.m_NormChannelType &&
745  m_NormMethodType == rhs.m_NormMethodType &&
746  m_NormSize == rhs.m_NormSize &&
747  m_Alpha == rhs.m_Alpha &&
748  m_Beta == rhs.m_Beta &&
749  m_K == rhs.m_K &&
750  m_DataLayout == rhs.m_DataLayout;
751  }
752 
753  /// Normalization channel algorithm to use (Across, Within).
755  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
757  /// Depth radius value.
758  uint32_t m_NormSize;
759  /// Alpha value for the normalization equation.
760  float m_Alpha;
761  /// Beta value for the normalization equation.
762  float m_Beta;
763  /// Kappa value used for the across channel normalization equation.
764  float m_K;
765  /// The data layout to be used (NCHW, NHWC).
767 };
768 
769 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
771 {
773  : m_Eps(1e-12f)
774  , m_DataLayout(DataLayout::NCHW)
775  {}
776 
777  bool operator ==(const L2NormalizationDescriptor& rhs) const
778  {
779  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
780  }
781 
782  /// Used to avoid dividing by zero.
783  float m_Eps;
784  /// The data layout to be used (NCHW, NHWC).
786 };
787 
788 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
790 {
792  : m_Eps(0.0001f)
793  , m_DataLayout(DataLayout::NCHW)
794  {}
795 
797  {
798  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
799  }
800 
801  /// Value to add to the variance. Used to avoid dividing by zero.
802  float m_Eps;
803  /// The data layout to be used (NCHW, NHWC).
805 };
806 
807 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
809 {
811  : m_Gamma(1.0f)
812  , m_Beta(0.0f)
813  , m_Eps(1e-12f)
814  , m_DataLayout(DataLayout::NCHW)
815  {}
816 
818  {
819  return m_Gamma == rhs.m_Gamma &&
820  m_Beta == rhs.m_Beta &&
821  m_Eps == rhs.m_Eps &&
822  m_DataLayout == rhs.m_DataLayout;
823  }
824 
825  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
826  float m_Gamma;
827  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
828  float m_Beta;
829  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
830  float m_Eps;
831  /// The data layout to be used (NCHW, NHWC).
833 };
834 
835 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
837 {
839  : m_BlockShape({1, 1})
840  , m_Crops({{0, 0}, {0, 0}})
841  , m_DataLayout(DataLayout::NCHW)
842  {}
843 
844  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
845  std::vector<std::pair<unsigned int, unsigned int>> crops)
846  : m_BlockShape(blockShape)
847  , m_Crops(crops)
848  , m_DataLayout(DataLayout::NCHW)
849  {}
850 
851  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
852  {
853  return m_BlockShape == rhs.m_BlockShape &&
854  m_Crops == rhs.m_Crops &&
855  m_DataLayout == rhs.m_DataLayout;
856  }
857 
858  /// Block shape values.
859  std::vector<unsigned int> m_BlockShape;
860  /// The values to crop from the input dimension.
861  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
862  /// The data layout to be used (NCHW, NHWC).
864 };
865 
866 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
868 {
870  : m_Min(-6.0f)
871  , m_Max(6.0f)
872  {}
873 
875  {
876  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
877  }
878 
879  /// Minimum value.
880  float m_Min;
881  /// Maximum value.
882  float m_Max;
883 };
884 
885 /// A FillDescriptor for the FillLayer
887 {
889  : m_Value(0)
890  {}
891 
892  FillDescriptor(const float& value)
893  : m_Value(value)
894  {}
895 
896  bool operator ==(const FillDescriptor& rhs) const
897  {
898  return m_Value == rhs.m_Value;
899  }
900 
901  float m_Value;
902 };
903 
904 /// A GatherDescriptor for the GatherLayer.
906 {
908  : m_Axis(0)
909  {}
910 
911  GatherDescriptor(int32_t axis)
912  : m_Axis(axis)
913  {}
914 
915  bool operator ==(const GatherDescriptor& rhs) const
916  {
917  return m_Axis == rhs.m_Axis;
918  }
919 
920  /// The axis in params to gather indices from
921  int32_t m_Axis;
922 };
923 
924 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
926  "ResizeBilinearDescriptor is not supported anymore. Use ResizeDescriptor instead.",
927  "22.08")
928  ResizeBilinearDescriptor : BaseDescriptor
929 {
930  ResizeBilinearDescriptor()
931  : m_TargetWidth(0)
932  , m_TargetHeight(0)
933  , m_DataLayout(DataLayout::NCHW)
934  , m_AlignCorners(false)
935  , m_HalfPixelCenters(false)
936  {}
937 
939  bool operator ==(const ResizeBilinearDescriptor& rhs) const
940  {
941  return m_TargetWidth == rhs.m_TargetWidth &&
942  m_TargetHeight == rhs.m_TargetHeight &&
943  m_DataLayout == rhs.m_DataLayout &&
944  m_AlignCorners == rhs.m_AlignCorners &&
945  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
946  }
948 
949  /// Target width value.
950  uint32_t m_TargetWidth;
951  /// Target height value.
952  uint32_t m_TargetHeight;
953  /// The data layout to be used (NCHW, NHWC).
954  DataLayout m_DataLayout;
955  /// Aligned corners
956  bool m_AlignCorners;
957  /// Half Pixel Centers
958  bool m_HalfPixelCenters;
959 };
960 
961 /// A ResizeDescriptor for the ResizeLayer.
963 {
965  : m_TargetWidth(0)
966  , m_TargetHeight(0)
967  , m_Method(ResizeMethod::NearestNeighbor)
968  , m_DataLayout(DataLayout::NCHW)
969  , m_AlignCorners(false)
970  , m_HalfPixelCenters(false)
971  {}
972 
973  bool operator ==(const ResizeDescriptor& rhs) const
974  {
975  return m_TargetWidth == rhs.m_TargetWidth &&
976  m_TargetHeight == rhs.m_TargetHeight &&
977  m_Method == rhs.m_Method &&
978  m_DataLayout == rhs.m_DataLayout &&
979  m_AlignCorners == rhs.m_AlignCorners &&
980  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
981  }
982 
983  /// Target width value.
984  uint32_t m_TargetWidth;
985  /// Target height value.
986  uint32_t m_TargetHeight;
987  /// The Interpolation method to use
988  /// (Bilinear, NearestNeighbor).
990  /// The data layout to be used (NCHW, NHWC).
992  /// Aligned corners
994  /// Half Pixel Centers
996 };
997 
998 
999 /// A ReshapeDescriptor for the ReshapeLayer.
1001 {
1003  : m_TargetShape()
1004  {}
1005 
1007  : m_TargetShape(shape)
1008  {}
1009 
1010  bool operator ==(const ReshapeDescriptor& rhs) const
1011  {
1012  return m_TargetShape == rhs.m_TargetShape;
1013  }
1014 
1015  /// Target shape value.
1017 };
1018 
1019 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
1021 {
1023  : m_BlockShape({1, 1})
1024  , m_PadList({{0, 0}, {0, 0}})
1025  , m_DataLayout(DataLayout::NCHW)
1026  {}
1027 
1028  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
1029  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1030  : m_BlockShape(blockShape)
1031  , m_PadList(padList)
1032  , m_DataLayout(DataLayout::NCHW)
1033  {}
1034 
1035  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
1036  {
1037  return m_BlockShape == rhs.m_BlockShape &&
1038  m_PadList == rhs.m_PadList &&
1039  m_DataLayout == rhs.m_DataLayout;
1040  }
1041 
1042  /// Block shape value.
1043  std::vector<unsigned int> m_BlockShape;
1044  /// @brief Specifies the padding values for the input dimension:
1045  /// heightPad{top, bottom} widthPad{left, right}.
1046  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1047  /// The data layout to be used (NCHW, NHWC).
1049 };
1050 
1051 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
1053 {
1056  {}
1057 
1058  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
1059  : m_BlockSize(blockSize)
1060  , m_DataLayout(dataLayout)
1061  {}
1062 
1063  bool operator ==(const SpaceToDepthDescriptor& rhs) const
1064  {
1065  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
1066  }
1067 
1068  /// Scalar specifying the input block size. It must be >= 1
1069  unsigned int m_BlockSize;
1070 
1071  /// The data layout to be used (NCHW, NHWC).
1073 };
1074 
1075 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
1077 
1078 /// An LstmDescriptor for the LstmLayer.
1080 {
1082  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
1083  , m_ClippingThresCell(0.0)
1084  , m_ClippingThresProj(0.0)
1085  , m_CifgEnabled(true)
1086  , m_PeepholeEnabled(false)
1087  , m_ProjectionEnabled(false)
1088  , m_LayerNormEnabled(false)
1089  , m_TimeMajor(false)
1090  , m_InputIntermediateScale(0.0)
1091  , m_ForgetIntermediateScale(0.0)
1092  , m_CellIntermediateScale(0.0)
1093  , m_OutputIntermediateScale(0.0)
1094  , m_HiddenStateZeroPoint(0)
1095  , m_HiddenStateScale(0.0)
1096  {}
1097 
1098  bool operator ==(const LstmDescriptor& rhs) const
1099  {
1100  return m_ActivationFunc == rhs.m_ActivationFunc &&
1101  m_ClippingThresCell == rhs.m_ClippingThresCell &&
1102  m_ClippingThresProj == rhs.m_ClippingThresProj &&
1103  m_CifgEnabled == rhs.m_CifgEnabled &&
1104  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1105  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1106  m_TimeMajor == rhs.m_TimeMajor &&
1107  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1108  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1109  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1110  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1111  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1112  m_HiddenStateScale == rhs.m_HiddenStateScale;
1113  }
1114 
1115  /// @brief The activation function to use.
1116  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
1118  /// Clipping threshold value for the cell state.
1120  /// Clipping threshold value for the projection.
1122  /// Enable/disable cifg (coupled input & forget gate).
1124  /// Enable/disable peephole.
1126  /// Enable/disable the projection layer.
1128  /// Enable/disable layer normalization
1130  /// Enable/disable time major
1132  /// Input intermediate quantization scale
1134  /// Forget intermediate quantization scale
1136  /// Cell intermediate quantization scale
1138  /// Output intermediate quantization scale
1140  /// Hidden State zero point
1142  /// Hidden State quantization scale
1144 };
1145 
1147 
1148 /// A MeanDescriptor for the MeanLayer.
1150 {
1152  : m_Axis()
1153  , m_KeepDims(false)
1154  {}
1155 
1156  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
1157  : m_Axis(axis)
1158  , m_KeepDims(keepDims)
1159  {}
1160 
1161  bool operator ==(const MeanDescriptor& rhs) const
1162  {
1163  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
1164  }
1165 
1166  /// Values for the dimensions to reduce.
1167  std::vector<unsigned int> m_Axis;
1168  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
1170 };
1171 
1172 /// A PadDescriptor for the PadLayer.
1174 {
1175  PadDescriptor() : m_PadValue(0), m_PaddingMode(PaddingMode::Constant)
1176  {}
1177 
1178  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList,
1179  const float& padValue = 0,
1180  const PaddingMode& paddingMode = PaddingMode::Constant)
1181  : m_PadList(padList)
1182  , m_PadValue(padValue)
1183  , m_PaddingMode(paddingMode)
1184  {}
1185 
1186  bool operator ==(const PadDescriptor& rhs) const
1187  {
1188  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode;
1189  }
1190 
1191  /// @brief Specifies the padding for input dimension.
1192  /// First is the number of values to add before the tensor in the dimension.
1193  /// Second is the number of values to add after the tensor in the dimension.
1194  /// The number of pairs should match the number of dimensions in the input tensor.
1195  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1196 
1197  /// Optional value to use for padding, defaults to 0
1198  float m_PadValue;
1199 
1200  /// Specifies the Padding mode (Constant, Reflect or Symmetric)
1202 };
1203 
1204 /// A SliceDescriptor for the SliceLayer.
1206 {
1207  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1208  : m_Begin(begin)
1209  , m_Size(size)
1210  {}
1211 
1213  {}
1214 
1215  bool operator ==(const SliceDescriptor& rhs) const
1216  {
1217  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1218  }
1219 
1220  /// Beginning indices of the slice in each dimension.
1221  std::vector<unsigned int> m_Begin;
1222 
1223  /// Size of the slice in each dimension.
1224  std::vector<unsigned int> m_Size;
1225 };
1226 
1227 /// A StackDescriptor for the StackLayer.
1229 {
1231  : m_Axis(0)
1232  , m_NumInputs(0)
1233  , m_InputShape()
1234  {}
1235 
1236  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1237  : m_Axis(axis)
1238  , m_NumInputs(numInputs)
1239  , m_InputShape(inputShape)
1240  {}
1241 
1242  bool operator ==(const StackDescriptor& rhs) const
1243  {
1244  return m_Axis == rhs.m_Axis &&
1245  m_NumInputs == rhs.m_NumInputs &&
1246  m_InputShape == rhs.m_InputShape;
1247  }
1248 
1249  /// 0-based axis along which to stack the input tensors.
1250  uint32_t m_Axis;
1251  /// Number of input tensors.
1252  uint32_t m_NumInputs;
1253  /// Required shape of all input tensors.
1255 };
1256 
1257 /// A StandInDescriptor for the StandIn layer
1259 {
1261 
1262  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1263  : m_NumInputs(numInputs)
1264  , m_NumOutputs(numOutputs)
1265  {}
1266 
1267  bool operator ==(const StandInDescriptor& rhs) const
1268  {
1269  return m_NumInputs == rhs.m_NumInputs &&
1270  m_NumOutputs == rhs.m_NumOutputs;
1271  }
1272 
1273  /// Number of input tensors
1274  uint32_t m_NumInputs = 0;
1275  /// Number of output tensors
1276  uint32_t m_NumOutputs = 0;
1277 };
1278 
1279 /// A StridedSliceDescriptor for the StridedSliceLayer.
1281 {
1282  StridedSliceDescriptor(const std::vector<int>& begin,
1283  const std::vector<int>& end,
1284  const std::vector<int>& stride)
1285  : m_Begin(begin)
1286  , m_End(end)
1287  , m_Stride(stride)
1288  , m_BeginMask(0)
1289  , m_EndMask(0)
1290  , m_ShrinkAxisMask(0)
1291  , m_EllipsisMask(0)
1292  , m_NewAxisMask(0)
1293  , m_DataLayout(DataLayout::NCHW)
1294  {}
1295 
1297  : StridedSliceDescriptor({}, {}, {})
1298  {}
1299 
1300  bool operator ==(const StridedSliceDescriptor& rhs) const
1301  {
1302  return m_Begin == rhs.m_Begin &&
1303  m_End == rhs.m_End &&
1304  m_Stride == rhs.m_Stride &&
1305  m_BeginMask == rhs.m_BeginMask &&
1306  m_EndMask == rhs.m_EndMask &&
1307  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1308  m_EllipsisMask == rhs.m_EllipsisMask &&
1309  m_NewAxisMask == rhs.m_NewAxisMask &&
1310  m_DataLayout == rhs.m_DataLayout;
1311  }
1312 
1313  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1314  int GetStopForAxis(const TensorShape& inputShape,
1315  unsigned int axis,
1316  int startForAxis) const;
1317 
1318  /// Begin values for the input that will be sliced.
1319  std::vector<int> m_Begin;
1320  /// End values for the input that will be sliced.
1321  std::vector<int> m_End;
1322  /// Stride values for the input that will be sliced.
1323  std::vector<int> m_Stride;
1324 
1325  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1326  /// range is used for the dimension.
1327  int32_t m_BeginMask;
1328  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1329  /// is used for the dimension.
1330  int32_t m_EndMask;
1331  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1333  /// Ellipsis mask value.
1335  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1336  /// a new 1 dimension is inserted to this location of the output tensor.
1337  int32_t m_NewAxisMask;
1338 
1339  /// The data layout to be used (NCHW, NHWC).
1341 };
1342 
1343 /// A PreCompiledDescriptor for the PreCompiledLayer.
1345 {
1346  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1347  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1348  {}
1349 
1350  ~PreCompiledDescriptor() = default;
1351 
1352  unsigned int m_NumInputSlots;
1353  unsigned int m_NumOutputSlots;
1354 };
1355 
1356 /// A QLstmDescriptor for the QLstmLayer.
1358 {
1360  : m_CellClip(0.0)
1361  , m_ProjectionClip(0.0)
1362  , m_CifgEnabled(true)
1363  , m_PeepholeEnabled(false)
1364  , m_ProjectionEnabled(false)
1365  , m_LayerNormEnabled(false)
1366  , m_InputIntermediateScale(0.0)
1367  , m_ForgetIntermediateScale(0.0)
1368  , m_CellIntermediateScale(0.0)
1369  , m_OutputIntermediateScale(0.0)
1370  , m_HiddenStateZeroPoint(0)
1371  , m_HiddenStateScale(0.0)
1372  {}
1373 
1374  bool operator ==(const QLstmDescriptor& rhs) const
1375  {
1376  return m_CellClip == rhs.m_CellClip &&
1377  m_ProjectionClip == rhs.m_ProjectionClip &&
1378  m_CifgEnabled == rhs.m_CifgEnabled &&
1379  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1380  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1381  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1382  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1383  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1384  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1385  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1386  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1387  m_HiddenStateScale == rhs.m_HiddenStateScale;
1388  }
1389 
1390  /// Clipping threshold value for the cell state
1391  float m_CellClip;
1392  /// Clipping threshold value for the projection
1394  /// Enable/disable CIFG (coupled input & forget gate).
1396  /// Enable/disable peephole
1398  /// Enable/disable the projection layer
1400  /// Enable/disable layer normalization
1402  /// Input intermediate quantization scale
1404  /// Forget intermediate quantization scale
1406  /// Cell intermediate quantization scale
1408  /// Output intermediate quantization scale
1410  /// Hidden State zero point
1412  /// Hidden State quantization scale
1414 };
1415 
1416 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1418 {
1420  m_PadLeft(0),
1421  m_PadRight(0),
1422  m_PadTop(0),
1423  m_PadBottom(0),
1424  m_StrideX(0),
1425  m_StrideY(0),
1426  m_BiasEnabled(false),
1427  m_DataLayout(DataLayout::NCHW),
1428  m_OutputShapeEnabled(false)
1429  {}
1430 
1432  {
1433  return m_PadLeft == rhs.m_PadLeft &&
1434  m_PadRight == rhs.m_PadRight &&
1435  m_PadTop == rhs.m_PadTop &&
1436  m_PadBottom == rhs.m_PadBottom &&
1437  m_StrideX == rhs.m_StrideX &&
1438  m_StrideY == rhs.m_StrideY &&
1439  m_BiasEnabled == rhs.m_BiasEnabled &&
1440  m_DataLayout == rhs.m_DataLayout &&
1441  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1442  m_OutputShape == rhs.m_OutputShape;
1443  }
1444 
1445  /// Padding left value in the width dimension.
1446  uint32_t m_PadLeft;
1447  /// Padding right value in the width dimension.
1448  uint32_t m_PadRight;
1449  /// Padding top value in the height dimension.
1450  uint32_t m_PadTop;
1451  /// Padding bottom value in the height dimension.
1452  uint32_t m_PadBottom;
1453  /// Stride value when proceeding through input for the width dimension.
1454  uint32_t m_StrideX;
1455  /// Stride value when proceeding through input for the height dimension.
1456  uint32_t m_StrideY;
1457  /// Enable/disable bias.
1459  /// The data layout to be used (NCHW, NHWC).
1461  /// Output shape if it has been specified.
1463  std::vector<unsigned int> m_OutputShape;
1464 };
1465 
1466 /// A TransposeDescriptor for the TransposeLayer.
1468 {
1470  : m_DimMappings{}
1471  {}
1472 
1474  : m_DimMappings(dimMappings)
1475  {}
1476 
1477  bool operator ==(const TransposeDescriptor &rhs) const
1478  {
1479  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1480  }
1481 
1482  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1483  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1485 };
1486 
1487 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1489 {
1492  {}
1493 
1495  : m_Operation(operation)
1496  {}
1497 
1498  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1499  {
1500  return m_Operation == rhs.m_Operation;
1501  }
1502 
1503  /// Specifies the logical operation to execute
1505 };
1506 
1507 /// A ReduceDescriptor for the REDUCE operators.
1509 {
1511  : m_KeepDims(false)
1512  , m_vAxis()
1513  , m_ReduceOperation(ReduceOperation::Sum)
1514  {}
1515 
1516  bool operator ==(const ReduceDescriptor& rhs) const
1517  {
1518  return m_KeepDims == rhs.m_KeepDims &&
1519  m_vAxis == rhs.m_vAxis &&
1520  m_ReduceOperation == rhs.m_ReduceOperation;
1521  }
1522 
1523  /// if true then output shape has no change.
1525  /// The indices of the dimensions to reduce.
1526  std::vector<uint32_t> m_vAxis;
1527  /// Specifies the reduction operation to execute
1529 };
1530 
1531 /// A ChannelShuffleDescriptor for the ChannelShuffle operator
1533 {
1535  : m_NumGroups(0), m_Axis(0)
1536  {}
1537 
1538  ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
1539  : m_NumGroups(numGroups), m_Axis(axis)
1540  {}
1541 
1542  bool operator ==(const ChannelShuffleDescriptor& rhs) const
1543  {
1544  return m_NumGroups == rhs.m_NumGroups;
1545  }
1546 
1547  /// Number of groups for the channel shuffle operation
1548  uint32_t m_NumGroups;
1549  /// Axis to apply channel shuffle operation on
1550  uint32_t m_Axis;
1551 };
1552 
1553 /// A BatchMatMulDescriptor for the BatchMatMul operator
1555 {
1557  Optional<DataLayout> dataLayoutY = EmptyOptional(),
1558  std::vector<unsigned int> transposeX = {},
1559  std::vector<unsigned int> transposeY = {},
1560  std::vector<unsigned int> adjointX = {},
1561  std::vector<unsigned int> adjointY = {})
1562  : m_DataLayoutX(dataLayoutX)
1563  , m_DataLayoutY(dataLayoutY)
1564  , m_TransposeX(transposeX)
1565  , m_TransposeY(transposeY)
1566  , m_AdjointX(adjointX)
1567  , m_AdjointY(adjointY)
1568  {}
1569 
1570  bool operator ==(const BatchMatMulDescriptor &rhs) const
1571  {
1572  return m_DataLayoutX == rhs.m_DataLayoutX &&
1573  m_DataLayoutY == rhs.m_DataLayoutY &&
1574  m_TransposeX == rhs.m_TransposeX &&
1575  m_TransposeY == rhs.m_TransposeY &&
1576  m_AdjointX == rhs.m_AdjointX &&
1577  m_AdjointY == rhs.m_AdjointY;
1578  }
1579 
1580  /// Data layout of each input tensor, such as NHWC/NDHWC (or leave as EmptyOptional for arbitrary layout)
1583 
1584  /// Transpose vector for each input tensor (leave as empty vector for no pre-transposing)
1585  /// Transpose and Adjoint can not both be set to true for the same tensor at the same time
1586  std::vector<unsigned int> m_TransposeX;
1587  std::vector<unsigned int> m_TransposeY;
1588 
1589  /// Adjoint vector for each input tensor (leave as empty vector for no pre-adjoint)
1590  /// Transpose and Adjoint can not both be set to true for the same tensor at the same time
1591  std::vector<unsigned int> m_AdjointX;
1592  std::vector<unsigned int> m_AdjointY;
1593 
1594  /// Static helper to get the two axes (for each input) for multiplication
1595  static std::pair<std::pair<unsigned int, unsigned int>, std::pair<unsigned int, unsigned int>> GetAxesToMul(
1596  const BatchMatMulDescriptor& desc,
1597  const TensorShape& tensorXShape,
1598  const TensorShape& tensorYShape);
1599 
1600  /// Static helper to get the axes (for each input) that will not be multiplied together
1601  static std::pair<std::vector<unsigned int>, std::vector<unsigned int>> GetAxesNotMul(
1602  const BatchMatMulDescriptor& desc,
1603  const TensorShape& inputXShape,
1604  const TensorShape& inputYShape);
1605 };
1606 
1607 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:62
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_PoolDepth
Pooling depth value.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
bool IsNull() const override
Definition: Descriptors.hpp:32
uint32_t m_PadBack
Padding back value in the depth dimension.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0, const PaddingMode &paddingMode=PaddingMode::Constant)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
Optional< DataLayout > m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (or leave as EmptyOptional for arbitrary layout)...
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
std::vector< unsigned int > m_TransposeX
Transpose vector for each input tensor (leave as empty vector for no pre-transposing) Transpose and A...
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:44
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:193
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
bool m_TimeMajor
Enable/disable time major.
ChannelShuffleDescriptor(const uint32_t &numGroups, const uint32_t &axis)
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_PadFront
Padding front value in the depth dimension.
std::vector< unsigned int > m_AdjointX
Adjoint vector for each input tensor (leave as empty vector for no pre-adjoint) Transpose and Adjoint...
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:136
virtual ~BaseDescriptor()=default
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:118
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:174
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:95
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
ComparisonOperation
Definition: Types.hpp:108
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:143
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
Definition: Descriptors.hpp:30
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t GetNumInputs(bool biasEnabled)
DataType
Definition: Types.hpp:48
float m_NmsIouThreshold
Intersection over union threshold.
float m_CellIntermediateScale
Cell intermediate quantization scale.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
BatchMatMulDescriptor(Optional< DataLayout > dataLayoutX=EmptyOptional(), Optional< DataLayout > dataLayoutY=EmptyOptional(), std::vector< unsigned int > transposeX={}, std::vector< unsigned int > transposeY={}, std::vector< unsigned int > adjointX={}, std::vector< unsigned int > adjointY={})
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:42
uint32_t m_NumClasses
Number of classes.
std::vector< unsigned int > m_TransposeY
bool m_HalfPixelCenters
Half Pixel Centers.
ARMNN_NO_DEPRECATE_WARN_BEGIN struct ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ResizeBilinearQueueDescriptor is deprecated use ResizeQueueDescriptor instead", "22.08") ResizeBilinearQueueDescriptor
float m_InputIntermediateScale
Input intermediate quantization scale.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant)...
Definition: Types.hpp:186
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
A BatchMatMulDescriptor for the BatchMatMul operator.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_HiddenStateScale
Hidden State quantization scale.
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:207
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
std::vector< unsigned int > m_AdjointY
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:102
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:152
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:124
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_Axis
Axis to apply channel shuffle operation on.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
float m_ScaleY
Center size encoding scale y.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
NormalizationAlgorithmMethod
Definition: Types.hpp:199
A ChannelShuffleDescriptor for the ChannelShuffle operator.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
Optional< DataLayout > m_DataLayoutY
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
uint32_t m_DilationZ
Dilation along z axis.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
virtual bool IsNull() const
Definition: Descriptors.hpp:24
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Definition: Descriptors.hpp:85
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:86
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
float m_OutputIntermediateScale
Output intermediate quantization scale.
bool m_ConstantWeights
Enable/disable constant weights and biases.