ArmNN
 20.02
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 
13 #include "Tensor.hpp"
14 #include "Types.hpp"
15 
16 namespace armnn
17 {
18 
19 /// An ActivationDescriptor for the ActivationLayer.
21 {
24  , m_A(0)
25  , m_B(0)
26  {}
27 
28  bool operator ==(const ActivationDescriptor &rhs) const
29  {
30  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
31  }
32 
33  /// @brief The activation function to use
34  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
36  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
37  float m_A;
38  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
39  float m_B;
40 };
41 
42 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
44 {
47  , m_Axis(-1)
48  {}
49 
50  bool operator ==(const ArgMinMaxDescriptor &rhs) const
51  {
52  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis;
53  }
54 
55  /// Specify if the function is to find Min or Max.
57  /// Axis to reduce across the input tensor.
58  int m_Axis;
59 };
60 
61 /// A ComparisonDescriptor for the ComparisonLayer
63 {
66  {}
67 
69  : m_Operation(operation)
70  {}
71 
72  bool operator ==(const ComparisonDescriptor &rhs) const
73  {
74  return m_Operation == rhs.m_Operation;
75  }
76 
77  /// Specifies the comparison operation to execute
79 };
80 
81 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
83 {
86  {}
87 
89  : m_Operation(operation)
90  {}
91 
92  bool operator ==(const ElementwiseUnaryDescriptor &rhs) const
93  {
94  return m_Operation == rhs.m_Operation;
95  }
96 
97  /// Specifies the elementwiseUnary operation to execute
99 };
100 
101 /// A PermuteDescriptor for the PermuteLayer.
103 {
105  : m_DimMappings{}
106  {}
107 
109  : m_DimMappings(dimMappings)
110  {}
111 
112  bool operator ==(const PermuteDescriptor &rhs) const
113  {
114  return m_DimMappings.IsEqual(rhs.m_DimMappings);
115  }
116 
117  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
118  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
120 };
121 
122 /// A SoftmaxDescriptor for the SoftmaxLayer.
124 {
126  : m_Beta(1.0f)
127  , m_Axis(-1)
128  {}
129 
130  bool operator ==(const SoftmaxDescriptor& rhs) const
131  {
132  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
133  }
134 
135  /// Exponentiation value.
136  float m_Beta;
137  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
138  int m_Axis;
139 };
140 
141 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
143 
144 /// @brief An OriginsDescriptor for the ConcatLayer.
145 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
146 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
148 {
150  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
151  OriginsDescriptor(const OriginsDescriptor& other);
153 
155 
156  OriginsDescriptor& operator=(OriginsDescriptor rhs);
157 
158  bool operator ==(const OriginsDescriptor& rhs) const;
159 
160  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
161  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
162  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
163  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
164  /// Get the number of views.
165  uint32_t GetNumViews() const;
166  /// Get the number of dimensions.
167  uint32_t GetNumDimensions() const;
168  /// Return the view origin at the int value idx.
169  const uint32_t* GetViewOrigin(uint32_t idx) const;
170  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
171  /// The number of views must match number of elements in the new ordering array.
172  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
173  /// Swap the ViewsDescriptor values first and second.
174  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
175  /// Set the concatenation axis value.
176  void SetConcatAxis(unsigned int concatAxis);
177  /// Get the concatenation axis value.
178  unsigned int GetConcatAxis() const;
179 
180 private:
181  unsigned int m_ConcatAxis;
182  uint32_t m_NumViews;
183  uint32_t m_NumDimensions;
184  uint32_t** m_ViewOrigins;
185 };
186 
187 /// @brief A ViewsDescriptor for the SplitterLayer.
188 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
189 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
191 {
192  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
193  ViewsDescriptor(const ViewsDescriptor& other);
194  ViewsDescriptor();
196 
197  ~ViewsDescriptor();
198 
199  ViewsDescriptor& operator=(ViewsDescriptor rhs);
200 
201  bool operator ==(const ViewsDescriptor& rhs) const;
202 
203  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
204  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
205  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
206  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
207  /// @brief Set the size of the views. The arguments are: view, dimension, value.
208  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
209  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
210  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
211 
212  /// Get the number of views.
213  uint32_t GetNumViews() const;
214  /// Get the number of dimensions.
215  uint32_t GetNumDimensions() const;
216  /// Get the view origin at the int value idx.
217  const uint32_t* GetViewOrigin(uint32_t idx) const;
218  /// Get the view sizes at the int value idx.
219  const uint32_t* GetViewSizes(uint32_t idx) const;
220  /// Get the View Origins
221  const OriginsDescriptor& GetOrigins() const;
222 
223  /// Swap the ViewsDescriptor value first and second.
224  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
225 private:
226  OriginsDescriptor m_Origins;
227  uint32_t** m_ViewSizes;
228 };
229 
230 template <typename TensorShapeIt>
231 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
233  TensorShapeIt last,
234  unsigned int concatenationDimension)
235 {
236  return CreateDescriptorForConcatenation(first, last, concatenationDimension);
237 }
238 
239 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
240 /// concatenation of a number of input tensors.
241 template <typename TensorShapeIt>
243  TensorShapeIt last,
244  unsigned int concatenationDimension)
245 {
246  auto numInputs = std::distance(first, last);
247 
248  if (numInputs < 2)
249  {
250  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
251  }
252 
253  const auto& firstInputShape = *first;
254 
255  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
256  for (auto it = first + 1; it != last; ++it)
257  {
258  if (it->GetNumDimensions() != numDimensions)
259  {
260  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
261  }
262  }
263 
264  if (concatenationDimension >= numDimensions)
265  {
266  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
267  }
268 
269  for (auto it = first; it != last; ++it)
270  {
271  for (unsigned int d = 0; d < numDimensions; ++d)
272  {
273  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
274  if (!dimSizeOk)
275  {
276  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
277  " except the concatenation dimension");
278  }
279  }
280  }
281 
282  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
283  viewsDescriptor.SetConcatAxis(concatenationDimension);
284 
285  uint32_t viewIndex = 0u;
286  uint32_t coordAlongConcatDim = 0u;
287  for (auto it = first; it != last; ++it)
288  {
289  const auto& inputShape = *it;
290 
291  for (unsigned int i = 0; i < concatenationDimension; ++i)
292  {
293  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
294  }
295 
296  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
297  unsigned int dimSize = inputShape[concatenationDimension];
298  coordAlongConcatDim += dimSize;
299 
300 
301  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
302  {
303  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
304  }
305 
306  ++viewIndex;
307  }
308 
309  return viewsDescriptor;
310 }
311 
312 /// A Pooling2dDescriptor for the Pooling2dLayer.
314 {
316  : m_PoolType(PoolingAlgorithm::Max)
317  , m_PadLeft(0)
318  , m_PadRight(0)
319  , m_PadTop(0)
320  , m_PadBottom(0)
321  , m_PoolWidth(0)
322  , m_PoolHeight(0)
323  , m_StrideX(0)
324  , m_StrideY(0)
325  , m_OutputShapeRounding(OutputShapeRounding::Floor)
326  , m_PaddingMethod(PaddingMethod::Exclude)
327  , m_DataLayout(DataLayout::NCHW)
328  {}
329 
330  bool operator ==(const Pooling2dDescriptor& rhs) const
331  {
332  return m_PoolType == rhs.m_PoolType &&
333  m_PadLeft == rhs.m_PadLeft &&
334  m_PadRight == rhs.m_PadRight &&
335  m_PadTop == rhs.m_PadTop &&
336  m_PadBottom == rhs.m_PadBottom &&
337  m_PoolWidth == rhs.m_PoolWidth &&
338  m_PoolHeight == rhs.m_PoolHeight &&
339  m_StrideX == rhs.m_StrideX &&
340  m_StrideY == rhs.m_StrideY &&
341  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
342  m_PaddingMethod == rhs.m_PaddingMethod &&
343  m_DataLayout == rhs.m_DataLayout;
344  }
345 
346  /// The pooling algorithm to use (Max. Average, L2).
348  /// Padding left value in the width dimension.
349  uint32_t m_PadLeft;
350  /// Padding right value in the width dimension.
351  uint32_t m_PadRight;
352  /// Padding top value in the height dimension.
353  uint32_t m_PadTop;
354  /// Padding bottom value in the height dimension.
355  uint32_t m_PadBottom;
356  /// Pooling width value.
357  uint32_t m_PoolWidth;
358  /// Pooling height value.
359  uint32_t m_PoolHeight;
360  /// Stride value when proceeding through input for the width dimension.
361  uint32_t m_StrideX;
362  /// Stride value when proceeding through input for the height dimension.
363  uint32_t m_StrideY;
364  /// The rounding method for the output shape. (Floor, Ceiling).
366  /// The padding method to be used. (Exclude, IgnoreValue).
368  /// The data layout to be used (NCHW, NHWC).
370 };
371 
372 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
374 {
376  : m_BiasEnabled(false)
377  , m_TransposeWeightMatrix(false)
378  {}
379 
380  bool operator ==(const FullyConnectedDescriptor& rhs) const
381  {
382  return m_BiasEnabled == rhs.m_BiasEnabled && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix;
383  }
384 
385  /// Enable/disable bias.
387  /// Enable/disable transpose weight matrix.
389 };
390 
391 /// A Convolution2dDescriptor for the Convolution2dLayer.
393 {
395  : m_PadLeft(0)
396  , m_PadRight(0)
397  , m_PadTop(0)
398  , m_PadBottom(0)
399  , m_StrideX(0)
400  , m_StrideY(0)
401  , m_DilationX(1)
402  , m_DilationY(1)
403  , m_BiasEnabled(false)
404  , m_DataLayout(DataLayout::NCHW)
405  {}
406 
407  bool operator ==(const Convolution2dDescriptor& rhs) const
408  {
409  return m_PadLeft == rhs.m_PadLeft &&
410  m_PadRight == rhs.m_PadRight &&
411  m_PadTop == rhs.m_PadTop &&
412  m_PadBottom == rhs.m_PadBottom &&
413  m_StrideX == rhs.m_StrideX &&
414  m_StrideY == rhs.m_StrideY &&
415  m_DilationX == rhs.m_DilationX &&
416  m_DilationY == rhs.m_DilationY &&
417  m_BiasEnabled == rhs.m_BiasEnabled &&
418  m_DataLayout == rhs.m_DataLayout;
419  }
420 
421  /// Padding left value in the width dimension.
422  uint32_t m_PadLeft;
423  /// Padding right value in the width dimension.
424  uint32_t m_PadRight;
425  /// Padding top value in the height dimension.
426  uint32_t m_PadTop;
427  /// Padding bottom value in the height dimension.
428  uint32_t m_PadBottom;
429  /// Stride value when proceeding through input for the width dimension.
430  uint32_t m_StrideX;
431  /// Stride value when proceeding through input for the height dimension.
432  uint32_t m_StrideY;
433  /// Dilation along x axis
434  uint32_t m_DilationX;
435  /// Dilation along y axis
436  uint32_t m_DilationY;
437  /// Enable/disable bias.
439  /// The data layout to be used (NCHW, NHWC).
441 };
442 
443 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
445 {
447  : m_PadLeft(0)
448  , m_PadRight(0)
449  , m_PadTop(0)
450  , m_PadBottom(0)
451  , m_StrideX(0)
452  , m_StrideY(0)
453  , m_DilationX(1)
454  , m_DilationY(1)
455  , m_BiasEnabled(false)
456  , m_DataLayout(DataLayout::NCHW)
457  {}
458 
460  {
461  return m_PadLeft == rhs.m_PadLeft &&
462  m_PadRight == rhs.m_PadRight &&
463  m_PadTop == rhs.m_PadTop &&
464  m_PadBottom == rhs.m_PadBottom &&
465  m_StrideX == rhs.m_StrideX &&
466  m_StrideY == rhs.m_StrideY &&
467  m_DilationX == rhs.m_DilationX &&
468  m_DilationY == rhs.m_DilationY &&
469  m_BiasEnabled == rhs.m_BiasEnabled &&
470  m_DataLayout == rhs.m_DataLayout;
471  }
472 
473  /// Padding left value in the width dimension.
474  uint32_t m_PadLeft;
475  /// Padding right value in the width dimension.
476  uint32_t m_PadRight;
477  /// Padding top value in the height dimension.
478  uint32_t m_PadTop;
479  /// Padding bottom value in the height dimension.
480  uint32_t m_PadBottom;
481  /// Stride value when proceeding through input for the width dimension.
482  uint32_t m_StrideX;
483  /// Stride value when proceeding through input for the height dimension.
484  uint32_t m_StrideY;
485  /// Dilation factor value for width dimension.
486  uint32_t m_DilationX;
487  /// Dilation factor value for height dimension.
488  uint32_t m_DilationY;
489  /// Enable/disable bias.
491  /// The data layout to be used (NCHW, NHWC).
493 };
494 
496 {
498  : m_MaxDetections(0)
499  , m_MaxClassesPerDetection(1)
500  , m_DetectionsPerClass(1)
501  , m_NmsScoreThreshold(0)
502  , m_NmsIouThreshold(0)
503  , m_NumClasses(0)
504  , m_UseRegularNms(false)
505  , m_ScaleX(0)
506  , m_ScaleY(0)
507  , m_ScaleW(0)
508  , m_ScaleH(0)
509  {}
510 
512  {
513  return m_MaxDetections == rhs.m_MaxDetections &&
514  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
515  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
516  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
517  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
518  m_NumClasses == rhs.m_NumClasses &&
519  m_UseRegularNms == rhs.m_UseRegularNms &&
520  m_ScaleX == rhs.m_ScaleX &&
521  m_ScaleY == rhs.m_ScaleY &&
522  m_ScaleW == rhs.m_ScaleW &&
523  m_ScaleH == rhs.m_ScaleH;
524  }
525 
526  /// Maximum numbers of detections.
527  uint32_t m_MaxDetections;
528  /// Maximum numbers of classes per detection, used in Fast NMS.
530  /// Detections per classes, used in Regular NMS.
532  /// NMS score threshold.
534  /// Intersection over union threshold.
536  /// Number of classes.
537  uint32_t m_NumClasses;
538  /// Use Regular NMS.
540  /// Center size encoding scale x.
541  float m_ScaleX;
542  /// Center size encoding scale y.
543  float m_ScaleY;
544  /// Center size encoding scale weight.
545  float m_ScaleW;
546  /// Center size encoding scale height.
547  float m_ScaleH;
548 };
549 
550 /// A NormalizationDescriptor for the NormalizationLayer.
552 {
554  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
555  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
556  , m_NormSize(0)
557  , m_Alpha(0.f)
558  , m_Beta(0.f)
559  , m_K(0.f)
560  , m_DataLayout(DataLayout::NCHW)
561  {}
562 
563  bool operator ==(const NormalizationDescriptor& rhs) const
564  {
565  return m_NormChannelType == rhs.m_NormChannelType &&
566  m_NormMethodType == rhs.m_NormMethodType &&
567  m_NormSize == rhs.m_NormSize &&
568  m_Alpha == rhs.m_Alpha &&
569  m_Beta == rhs.m_Beta &&
570  m_K == rhs.m_K &&
571  m_DataLayout == rhs.m_DataLayout;
572  }
573 
574  /// Normalization channel algorithm to use (Across, Within).
576  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
578  /// Depth radius value.
579  uint32_t m_NormSize;
580  /// Alpha value for the normalization equation.
581  float m_Alpha;
582  /// Beta value for the normalization equation.
583  float m_Beta;
584  /// Kappa value used for the across channel normalization equation.
585  float m_K;
586  /// The data layout to be used (NCHW, NHWC).
588 };
589 
590 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
592 {
594  : m_Eps(1e-12f)
595  , m_DataLayout(DataLayout::NCHW)
596  {}
597 
598  bool operator ==(const L2NormalizationDescriptor& rhs) const
599  {
600  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
601  }
602 
603  /// Used to avoid dividing by zero.
604  float m_Eps;
605  /// The data layout to be used (NCHW, NHWC).
607 };
608 
609 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
611 {
613  : m_Eps(0.0001f)
614  , m_DataLayout(DataLayout::NCHW)
615  {}
616 
618  {
619  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
620  }
621 
622  /// Value to add to the variance. Used to avoid dividing by zero.
623  float m_Eps;
624  /// The data layout to be used (NCHW, NHWC).
626 };
627 
628 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
630 {
632  : m_Gamma(1.0f)
633  , m_Beta(0.0f)
634  , m_Eps(1e-12f)
635  , m_DataLayout(DataLayout::NCHW)
636  {}
637 
639  {
640  return m_Gamma == rhs.m_Gamma &&
641  m_Beta == rhs.m_Beta &&
642  m_Eps == rhs.m_Eps &&
643  m_DataLayout == rhs.m_DataLayout;
644  }
645 
646  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
647  float m_Gamma;
648  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
649  float m_Beta;
650  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
651  float m_Eps;
652  /// The data layout to be used (NCHW, NHWC).
654 };
655 
656 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
658 {
660  : m_BlockShape({1, 1})
661  , m_Crops({{0, 0}, {0, 0}})
662  , m_DataLayout(DataLayout::NCHW)
663  {}
664 
665  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
666  std::vector<std::pair<unsigned int, unsigned int>> crops)
667  : m_BlockShape(blockShape)
668  , m_Crops(crops)
669  , m_DataLayout(DataLayout::NCHW)
670  {}
671 
672  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
673  {
674  return m_BlockShape == rhs.m_BlockShape &&
675  m_Crops == rhs.m_Crops &&
676  m_DataLayout == rhs.m_DataLayout;
677  }
678 
679  /// Block shape values.
680  std::vector<unsigned int> m_BlockShape;
681  /// The values to crop from the input dimension.
682  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
683  /// The data layout to be used (NCHW, NHWC).
685 };
686 
687 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
689 {
691  : m_Min(-6.0f)
692  , m_Max(6.0f)
693  {}
694 
696  {
697  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
698  }
699 
700  /// Minimum value.
701  float m_Min;
702  /// Maximum value.
703  float m_Max;
704 };
705 
706 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
708 {
710  : m_TargetWidth(0)
711  , m_TargetHeight(0)
712  , m_DataLayout(DataLayout::NCHW)
713  {}
714 
715  /// Target width value.
716  uint32_t m_TargetWidth;
717  /// Target height value.
718  uint32_t m_TargetHeight;
719  /// The data layout to be used (NCHW, NHWC).
721 };
722 
723 /// A ResizeDescriptor for the ResizeLayer.
725 {
727  : m_TargetWidth(0)
728  , m_TargetHeight(0)
729  , m_Method(ResizeMethod::NearestNeighbor)
730  , m_DataLayout(DataLayout::NCHW)
731  , m_BilinearAlignCorners(false)
732  {}
733 
734  bool operator ==(const ResizeDescriptor& rhs) const
735  {
736  return m_TargetWidth == rhs.m_TargetWidth &&
737  m_TargetHeight == rhs.m_TargetHeight &&
738  m_Method == rhs.m_Method &&
739  m_DataLayout == rhs.m_DataLayout &&
740  m_BilinearAlignCorners == rhs.m_BilinearAlignCorners;
741  }
742 
743  /// Target width value.
744  uint32_t m_TargetWidth;
745  /// Target height value.
746  uint32_t m_TargetHeight;
747  /// The Interpolation method to use
748  /// (Bilinear, NearestNeighbor).
750  /// The data layout to be used (NCHW, NHWC).
752  /// Aligned corners for bilinear method
754 };
755 
756 
757 /// A ReshapeDescriptor for the ReshapeLayer.
759 {
761  : m_TargetShape()
762  {}
763 
765  : m_TargetShape(shape)
766  {}
767 
768  bool operator ==(const ReshapeDescriptor& rhs) const
769  {
770  return m_TargetShape == rhs.m_TargetShape;
771  }
772 
773  /// Target shape value.
775 };
776 
777 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
779 {
781  : m_BlockShape({1, 1})
782  , m_PadList({{0, 0}, {0, 0}})
783  , m_DataLayout(DataLayout::NCHW)
784  {}
785 
786  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
787  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
788  : m_BlockShape(blockShape)
789  , m_PadList(padList)
790  , m_DataLayout(DataLayout::NCHW)
791  {}
792 
793  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
794  {
795  return m_BlockShape == rhs.m_BlockShape &&
796  m_PadList == rhs.m_PadList &&
797  m_DataLayout == rhs.m_DataLayout;
798  }
799 
800  /// Block shape value.
801  std::vector<unsigned int> m_BlockShape;
802  /// @brief Specifies the padding values for the input dimension:
803  /// heightPad{top, bottom} widthPad{left, right}.
804  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
805  /// The data layout to be used (NCHW, NHWC).
807 };
808 
809 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
811 {
814  {}
815 
816  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
817  : m_BlockSize(blockSize)
818  , m_DataLayout(dataLayout)
819  {}
820 
821  bool operator ==(const SpaceToDepthDescriptor& rhs) const
822  {
823  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
824  }
825 
826  /// Scalar specifying the input block size. It must be >= 1
827  unsigned int m_BlockSize;
828 
829  /// The data layout to be used (NCHW, NHWC).
831 };
832 
833 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
835 
836 /// An LstmDescriptor for the LstmLayer.
838 {
840  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
841  , m_ClippingThresCell(0.0)
842  , m_ClippingThresProj(0.0)
843  , m_CifgEnabled(true)
844  , m_PeepholeEnabled(false)
845  , m_ProjectionEnabled(false)
846  , m_LayerNormEnabled(false)
847  {}
848 
849  bool operator ==(const LstmDescriptor& rhs) const
850  {
851  return m_ActivationFunc == rhs.m_ActivationFunc &&
852  m_ClippingThresCell == rhs.m_ClippingThresCell &&
853  m_ClippingThresProj == rhs.m_ClippingThresProj &&
854  m_CifgEnabled == rhs.m_CifgEnabled &&
855  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
856  m_LayerNormEnabled == rhs.m_LayerNormEnabled;
857  }
858 
859  /// @brief The activation function to use.
860  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
862  /// Clipping threshold value for the cell state.
864  /// Clipping threshold value for the projection.
866  /// Enable/disable cifg (coupled input & forget gate).
868  /// Enable/disable peephole.
870  /// Enable/disable the projection layer.
872  /// Enable/disable layer normalization
874 };
875 
876 /// A MeanDescriptor for the MeanLayer.
878 {
880  : m_Axis()
881  , m_KeepDims(false)
882  {}
883 
884  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
885  : m_Axis(axis)
886  , m_KeepDims(keepDims)
887  {}
888 
889  bool operator ==(const MeanDescriptor& rhs) const
890  {
891  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
892  }
893 
894  /// Values for the dimensions to reduce.
895  std::vector<unsigned int> m_Axis;
896  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
898 };
899 
900 /// A PadDescriptor for the PadLayer.
902 {
903  PadDescriptor() : m_PadValue(0)
904  {}
905 
906  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
907  : m_PadList(padList)
908  , m_PadValue(padValue)
909  {}
910 
911  bool operator ==(const PadDescriptor& rhs) const
912  {
913  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
914  }
915 
916  /// @brief Specifies the padding for input dimension.
917  /// First is the number of values to add before the tensor in the dimension.
918  /// Second is the number of values to add after the tensor in the dimension.
919  /// The number of pairs should match the number of dimensions in the input tensor.
920  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
921 
922  /// Optional value to use for padding, defaults to 0
923  float m_PadValue;
924 };
925 
926 /// A SliceDescriptor for the SliceLayer.
928 {
929  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
930  : m_Begin(begin)
931  , m_Size(size)
932  {}
933 
935  {}
936 
937  bool operator ==(const SliceDescriptor& rhs) const
938  {
939  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
940  }
941 
942  /// Beginning indices of the slice in each dimension.
943  std::vector<unsigned int> m_Begin;
944 
945  /// Size of the slice in each dimension.
946  std::vector<unsigned int> m_Size;
947 };
948 
949 /// A StackDescriptor for the StackLayer.
951 {
953  : m_Axis(0)
954  , m_NumInputs(0)
955  , m_InputShape()
956  {}
957 
958  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
959  : m_Axis(axis)
960  , m_NumInputs(numInputs)
961  , m_InputShape(inputShape)
962  {}
963 
964  bool operator ==(const StackDescriptor& rhs) const
965  {
966  return m_Axis == rhs.m_Axis &&
967  m_NumInputs == rhs.m_NumInputs &&
968  m_InputShape == rhs.m_InputShape;
969  }
970 
971  /// 0-based axis along which to stack the input tensors.
972  uint32_t m_Axis;
973  /// Number of input tensors.
974  uint32_t m_NumInputs;
975  /// Required shape of all input tensors.
977 };
978 
979 /// A StandInDescriptor for the StandIn layer
981 {
983 
984  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
985  : m_NumInputs(numInputs)
986  , m_NumOutputs(numOutputs)
987  {}
988 
989  bool operator ==(const StandInDescriptor& rhs) const
990  {
991  return m_NumInputs == rhs.m_NumInputs &&
992  m_NumOutputs == rhs.m_NumOutputs;
993  }
994 
995  /// Number of input tensors
996  uint32_t m_NumInputs = 0;
997  /// Number of output tensors
998  uint32_t m_NumOutputs = 0;
999 };
1000 
1001 /// A StridedSliceDescriptor for the StridedSliceLayer.
1003 {
1004  StridedSliceDescriptor(const std::vector<int>& begin,
1005  const std::vector<int>& end,
1006  const std::vector<int>& stride)
1007  : m_Begin(begin)
1008  , m_End(end)
1009  , m_Stride(stride)
1010  , m_BeginMask(0)
1011  , m_EndMask(0)
1012  , m_ShrinkAxisMask(0)
1013  , m_EllipsisMask(0)
1014  , m_NewAxisMask(0)
1015  , m_DataLayout(DataLayout::NCHW)
1016  {}
1017 
1019  : StridedSliceDescriptor({}, {}, {})
1020  {}
1021 
1022  bool operator ==(const StridedSliceDescriptor& rhs) const
1023  {
1024  return m_Begin == rhs.m_Begin &&
1025  m_End == rhs.m_End &&
1026  m_Stride == rhs.m_Stride &&
1027  m_BeginMask == rhs.m_BeginMask &&
1028  m_EndMask == rhs.m_EndMask &&
1029  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1030  m_EllipsisMask == rhs.m_EllipsisMask &&
1031  m_NewAxisMask == rhs.m_NewAxisMask &&
1032  m_DataLayout == rhs.m_DataLayout;
1033  }
1034 
1035  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1036  int GetStopForAxis(const TensorShape& inputShape,
1037  unsigned int axis,
1038  int startForAxis) const;
1039 
1040  /// Begin values for the input that will be sliced.
1041  std::vector<int> m_Begin;
1042  /// End values for the input that will be sliced.
1043  std::vector<int> m_End;
1044  /// Stride values for the input that will be sliced.
1045  std::vector<int> m_Stride;
1046 
1047  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1048  /// range is used for the dimension.
1049  int32_t m_BeginMask;
1050  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1051  /// is used for the dimension.
1052  int32_t m_EndMask;
1053  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1055  /// Ellipsis mask value.
1057  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1058  /// a new 1 dimension is inserted to this location of the output tensor.
1059  int32_t m_NewAxisMask;
1060 
1061  /// The data layout to be used (NCHW, NHWC).
1063 };
1064 
1065 /// A PreCompiledDescriptor for the PreCompiledLayer.
1067 {
1068  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1069  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1070  {}
1071 
1072  ~PreCompiledDescriptor() = default;
1073 
1074  unsigned int m_NumInputSlots;
1075  unsigned int m_NumOutputSlots;
1076 };
1077 
1078 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1080 {
1082  m_PadLeft(0),
1083  m_PadRight(0),
1084  m_PadTop(0),
1085  m_PadBottom(0),
1086  m_StrideX(0),
1087  m_StrideY(0),
1088  m_BiasEnabled(false),
1089  m_DataLayout(DataLayout::NCHW)
1090  {}
1091 
1093  {
1094  return m_PadLeft == rhs.m_PadLeft &&
1095  m_PadRight == rhs.m_PadRight &&
1096  m_PadTop == rhs.m_PadTop &&
1097  m_PadBottom == rhs.m_PadBottom &&
1098  m_StrideX == rhs.m_StrideX &&
1099  m_StrideY == rhs.m_StrideY &&
1100  m_BiasEnabled == rhs.m_BiasEnabled &&
1101  m_DataLayout == rhs.m_DataLayout;
1102  }
1103 
1104  /// Padding left value in the width dimension.
1105  uint32_t m_PadLeft;
1106  /// Padding right value in the width dimension.
1107  uint32_t m_PadRight;
1108  /// Padding top value in the height dimension.
1109  uint32_t m_PadTop;
1110  /// Padding bottom value in the height dimension.
1111  uint32_t m_PadBottom;
1112  /// Stride value when proceeding through input for the width dimension.
1113  uint32_t m_StrideX;
1114  /// Stride value when proceeding through input for the height dimension.
1115  uint32_t m_StrideY;
1116  /// Enable/disable bias.
1118  /// The data layout to be used (NCHW, NHWC).
1120 };
1121 
1122 /// A TransposeDescriptor for the TransposeLayer.
1124 {
1126  : m_DimMappings{}
1127  {}
1128 
1130  : m_DimMappings(dimMappings)
1131  {}
1132 
1133  bool operator ==(const TransposeDescriptor &rhs) const
1134  {
1135  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1136  }
1137 
1138  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1139  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1141 };
1142 
1143 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
Definition: Descriptors.hpp:88
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:49
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_BiasEnabled
Enable/disable bias.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:56
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:126
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool operator==(const ActivationDescriptor &rhs) const
Definition: Descriptors.hpp:28
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Copyright (c) 2020 ARM Limited.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:96
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
DataLayout::NHWC false
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:118
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:68
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:77
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
float m_NmsIouThreshold
Intersection over union threshold.
bool m_BilinearAlignCorners
Aligned corners for bilinear method.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:26
uint32_t m_NumClasses
Number of classes.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:140
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
ArgMinMaxFunction
Definition: Types.hpp:71
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:103
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:87
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58
float m_ScaleY
Center size encoding scale y.
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:132
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
DetectionPostProcessDescriptor()
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:55
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.