ArmNN
 21.02
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
22 struct BaseDescriptor {};
23 
24 /// An ActivationDescriptor for the ActivationLayer.
26 {
28  : m_Function(ActivationFunction::Sigmoid)
29  , m_A(0)
30  , m_B(0)
31  {}
32 
34  float a = 0,
35  float b = 0)
36  : m_Function(activation)
37  , m_A(a)
38  , m_B(b)
39  {}
40 
41  bool operator ==(const ActivationDescriptor &rhs) const
42  {
43  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
44  }
45 
46  /// @brief The activation function to use
47  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
49  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
50  float m_A;
51  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
52  float m_B;
53 };
54 
55 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
57 {
59  : m_Function(ArgMinMaxFunction::Min)
60  , m_Axis(-1)
61  , m_Output_Type(armnn::DataType::Signed32)
62  {}
63 
64  bool operator ==(const ArgMinMaxDescriptor &rhs) const
65  {
66  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
67  }
68 
69  /// Specify if the function is to find Min or Max.
71  /// Axis to reduce across the input tensor.
72  int m_Axis;
73  // Tensor data type and this could be int32 or int64. Default type is int64.
75 };
76 
77 /// A ComparisonDescriptor for the ComparisonLayer
79 {
82  {}
83 
85  : m_Operation(operation)
86  {}
87 
88  bool operator ==(const ComparisonDescriptor &rhs) const
89  {
90  return m_Operation == rhs.m_Operation;
91  }
92 
93  /// Specifies the comparison operation to execute
95 };
96 
97 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
99 {
102  {}
103 
105  : m_Operation(operation)
106  {}
107 
109  {
110  return m_Operation == rhs.m_Operation;
111  }
112 
113  /// Specifies the elementwiseUnary operation to execute
115 };
116 
117 /// A PermuteDescriptor for the PermuteLayer.
119 {
121  : m_DimMappings{}
122  {}
123 
125  : m_DimMappings(dimMappings)
126  {}
127 
128  bool operator ==(const PermuteDescriptor &rhs) const
129  {
130  return m_DimMappings.IsEqual(rhs.m_DimMappings);
131  }
132 
133  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
134  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
136 };
137 
138 /// A SoftmaxDescriptor for the SoftmaxLayer.
140 {
142  : m_Beta(1.0f)
143  , m_Axis(-1)
144  {}
145 
146  bool operator ==(const SoftmaxDescriptor& rhs) const
147  {
148  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
149  }
150 
151  /// Exponentiation value.
152  float m_Beta;
153  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
154  int m_Axis;
155 };
156 
157 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
159 
160 /// @brief An OriginsDescriptor for the ConcatLayer.
161 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
162 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
164 {
166  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
167  OriginsDescriptor(const OriginsDescriptor& other);
169 
171 
172  OriginsDescriptor& operator=(OriginsDescriptor rhs);
173 
174  bool operator ==(const OriginsDescriptor& rhs) const;
175 
176  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
177  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
178  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
179  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
180  /// Get the number of views.
181  uint32_t GetNumViews() const;
182  /// Get the number of dimensions.
183  uint32_t GetNumDimensions() const;
184  /// Return the view origin at the int value idx.
185  const uint32_t* GetViewOrigin(uint32_t idx) const;
186  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
187  /// The number of views must match number of elements in the new ordering array.
188  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
189  /// Swap the ViewsDescriptor values first and second.
190  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
191  /// Set the concatenation axis value.
192  void SetConcatAxis(unsigned int concatAxis);
193  /// Get the concatenation axis value.
194  unsigned int GetConcatAxis() const;
195 
196 private:
197  unsigned int m_ConcatAxis;
198  uint32_t m_NumViews;
199  uint32_t m_NumDimensions;
200  uint32_t** m_ViewOrigins;
201 };
202 
203 /// @brief A ViewsDescriptor for the SplitterLayer.
204 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
205 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
207 {
208  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
209  ViewsDescriptor(const ViewsDescriptor& other);
210  ViewsDescriptor();
212 
213  ~ViewsDescriptor();
214 
215  ViewsDescriptor& operator=(ViewsDescriptor rhs);
216 
217  bool operator ==(const ViewsDescriptor& rhs) const;
218 
219  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
220  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
221  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
222  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
223  /// @brief Set the size of the views. The arguments are: view, dimension, value.
224  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
225  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
226  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
227 
228  /// Get the number of views.
229  uint32_t GetNumViews() const;
230  /// Get the number of dimensions.
231  uint32_t GetNumDimensions() const;
232  /// Get the view origin at the int value idx.
233  const uint32_t* GetViewOrigin(uint32_t idx) const;
234  /// Get the view sizes at the int value idx.
235  const uint32_t* GetViewSizes(uint32_t idx) const;
236  /// Get the View Origins
237  const OriginsDescriptor& GetOrigins() const;
238 
239  /// Swap the ViewsDescriptor value first and second.
240  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
241 private:
242  OriginsDescriptor m_Origins;
243  uint32_t** m_ViewSizes;
244 };
245 
246 template <typename TensorShapeIt>
247 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
249  TensorShapeIt last,
250  unsigned int concatenationDimension)
251 {
252  return CreateDescriptorForConcatenation(first, last, concatenationDimension);
253 }
254 
255 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
256 /// concatenation of a number of input tensors.
257 template <typename TensorShapeIt>
259  TensorShapeIt last,
260  unsigned int concatenationDimension)
261 {
262  auto numInputs = std::distance(first, last);
263 
264  if (numInputs < 2)
265  {
266  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
267  }
268 
269  const auto& firstInputShape = *first;
270 
271  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
272  for (auto it = first + 1; it != last; ++it)
273  {
274  if (it->GetNumDimensions() != numDimensions)
275  {
276  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
277  }
278  }
279 
280  if (concatenationDimension >= numDimensions)
281  {
282  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
283  }
284 
285  for (auto it = first; it != last; ++it)
286  {
287  for (unsigned int d = 0; d < numDimensions; ++d)
288  {
289  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
290  if (!dimSizeOk)
291  {
292  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
293  " except the concatenation dimension");
294  }
295  }
296  }
297 
298  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
299  viewsDescriptor.SetConcatAxis(concatenationDimension);
300 
301  uint32_t viewIndex = 0u;
302  uint32_t coordAlongConcatDim = 0u;
303  for (auto it = first; it != last; ++it)
304  {
305  const auto& inputShape = *it;
306 
307  for (unsigned int i = 0; i < concatenationDimension; ++i)
308  {
309  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
310  }
311 
312  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
313  unsigned int dimSize = inputShape[concatenationDimension];
314  coordAlongConcatDim += dimSize;
315 
316 
317  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
318  {
319  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
320  }
321 
322  ++viewIndex;
323  }
324 
325  return viewsDescriptor;
326 }
327 
328 /// A Pooling2dDescriptor for the Pooling2dLayer.
330 {
332  : m_PoolType(PoolingAlgorithm::Max)
333  , m_PadLeft(0)
334  , m_PadRight(0)
335  , m_PadTop(0)
336  , m_PadBottom(0)
337  , m_PoolWidth(0)
338  , m_PoolHeight(0)
339  , m_StrideX(0)
340  , m_StrideY(0)
341  , m_OutputShapeRounding(OutputShapeRounding::Floor)
342  , m_PaddingMethod(PaddingMethod::Exclude)
343  , m_DataLayout(DataLayout::NCHW)
344  {}
345 
346  bool operator ==(const Pooling2dDescriptor& rhs) const
347  {
348  return m_PoolType == rhs.m_PoolType &&
349  m_PadLeft == rhs.m_PadLeft &&
350  m_PadRight == rhs.m_PadRight &&
351  m_PadTop == rhs.m_PadTop &&
352  m_PadBottom == rhs.m_PadBottom &&
353  m_PoolWidth == rhs.m_PoolWidth &&
354  m_PoolHeight == rhs.m_PoolHeight &&
355  m_StrideX == rhs.m_StrideX &&
356  m_StrideY == rhs.m_StrideY &&
357  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
358  m_PaddingMethod == rhs.m_PaddingMethod &&
359  m_DataLayout == rhs.m_DataLayout;
360  }
361 
362  /// The pooling algorithm to use (Max. Average, L2).
364  /// Padding left value in the width dimension.
365  uint32_t m_PadLeft;
366  /// Padding right value in the width dimension.
367  uint32_t m_PadRight;
368  /// Padding top value in the height dimension.
369  uint32_t m_PadTop;
370  /// Padding bottom value in the height dimension.
371  uint32_t m_PadBottom;
372  /// Pooling width value.
373  uint32_t m_PoolWidth;
374  /// Pooling height value.
375  uint32_t m_PoolHeight;
376  /// Stride value when proceeding through input for the width dimension.
377  uint32_t m_StrideX;
378  /// Stride value when proceeding through input for the height dimension.
379  uint32_t m_StrideY;
380  /// The rounding method for the output shape. (Floor, Ceiling).
382  /// The padding method to be used. (Exclude, IgnoreValue).
384  /// The data layout to be used (NCHW, NHWC).
386 };
387 
388 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
390 {
392  : m_BiasEnabled(false)
393  , m_TransposeWeightMatrix(false)
394  {}
395 
396  bool operator ==(const FullyConnectedDescriptor& rhs) const
397  {
398  return m_BiasEnabled == rhs.m_BiasEnabled && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix;
399  }
400 
401  /// Enable/disable bias.
403  /// Enable/disable transpose weight matrix.
405 };
406 
407 /// A Convolution2dDescriptor for the Convolution2dLayer.
409 {
411  : m_PadLeft(0)
412  , m_PadRight(0)
413  , m_PadTop(0)
414  , m_PadBottom(0)
415  , m_StrideX(1)
416  , m_StrideY(1)
417  , m_DilationX(1)
418  , m_DilationY(1)
419  , m_BiasEnabled(false)
420  , m_DataLayout(DataLayout::NCHW)
421  {}
422 
423  bool operator ==(const Convolution2dDescriptor& rhs) const
424  {
425  return m_PadLeft == rhs.m_PadLeft &&
426  m_PadRight == rhs.m_PadRight &&
427  m_PadTop == rhs.m_PadTop &&
428  m_PadBottom == rhs.m_PadBottom &&
429  m_StrideX == rhs.m_StrideX &&
430  m_StrideY == rhs.m_StrideY &&
431  m_DilationX == rhs.m_DilationX &&
432  m_DilationY == rhs.m_DilationY &&
433  m_BiasEnabled == rhs.m_BiasEnabled &&
434  m_DataLayout == rhs.m_DataLayout;
435  }
436 
437  /// Padding left value in the width dimension.
438  uint32_t m_PadLeft;
439  /// Padding right value in the width dimension.
440  uint32_t m_PadRight;
441  /// Padding top value in the height dimension.
442  uint32_t m_PadTop;
443  /// Padding bottom value in the height dimension.
444  uint32_t m_PadBottom;
445  /// Stride value when proceeding through input for the width dimension.
446  uint32_t m_StrideX;
447  /// Stride value when proceeding through input for the height dimension.
448  uint32_t m_StrideY;
449  /// Dilation along x axis
450  uint32_t m_DilationX;
451  /// Dilation along y axis
452  uint32_t m_DilationY;
453  /// Enable/disable bias.
455  /// The data layout to be used (NCHW, NHWC).
457 };
458 
459 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
461 {
463  : m_PadLeft(0)
464  , m_PadRight(0)
465  , m_PadTop(0)
466  , m_PadBottom(0)
467  , m_StrideX(1)
468  , m_StrideY(1)
469  , m_DilationX(1)
470  , m_DilationY(1)
471  , m_BiasEnabled(false)
472  , m_DataLayout(DataLayout::NCHW)
473  {}
474 
476  {
477  return m_PadLeft == rhs.m_PadLeft &&
478  m_PadRight == rhs.m_PadRight &&
479  m_PadTop == rhs.m_PadTop &&
480  m_PadBottom == rhs.m_PadBottom &&
481  m_StrideX == rhs.m_StrideX &&
482  m_StrideY == rhs.m_StrideY &&
483  m_DilationX == rhs.m_DilationX &&
484  m_DilationY == rhs.m_DilationY &&
485  m_BiasEnabled == rhs.m_BiasEnabled &&
486  m_DataLayout == rhs.m_DataLayout;
487  }
488 
489  /// Padding left value in the width dimension.
490  uint32_t m_PadLeft;
491  /// Padding right value in the width dimension.
492  uint32_t m_PadRight;
493  /// Padding top value in the height dimension.
494  uint32_t m_PadTop;
495  /// Padding bottom value in the height dimension.
496  uint32_t m_PadBottom;
497  /// Stride value when proceeding through input for the width dimension.
498  uint32_t m_StrideX;
499  /// Stride value when proceeding through input for the height dimension.
500  uint32_t m_StrideY;
501  /// Dilation factor value for width dimension.
502  uint32_t m_DilationX;
503  /// Dilation factor value for height dimension.
504  uint32_t m_DilationY;
505  /// Enable/disable bias.
507  /// The data layout to be used (NCHW, NHWC).
509 };
510 
512 {
514  : m_MaxDetections(0)
515  , m_MaxClassesPerDetection(1)
516  , m_DetectionsPerClass(1)
517  , m_NmsScoreThreshold(0)
518  , m_NmsIouThreshold(0)
519  , m_NumClasses(0)
520  , m_UseRegularNms(false)
521  , m_ScaleX(0)
522  , m_ScaleY(0)
523  , m_ScaleW(0)
524  , m_ScaleH(0)
525  {}
526 
528  {
529  return m_MaxDetections == rhs.m_MaxDetections &&
530  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
531  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
532  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
533  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
534  m_NumClasses == rhs.m_NumClasses &&
535  m_UseRegularNms == rhs.m_UseRegularNms &&
536  m_ScaleX == rhs.m_ScaleX &&
537  m_ScaleY == rhs.m_ScaleY &&
538  m_ScaleW == rhs.m_ScaleW &&
539  m_ScaleH == rhs.m_ScaleH;
540  }
541 
542  /// Maximum numbers of detections.
543  uint32_t m_MaxDetections;
544  /// Maximum numbers of classes per detection, used in Fast NMS.
546  /// Detections per classes, used in Regular NMS.
548  /// NMS score threshold.
550  /// Intersection over union threshold.
552  /// Number of classes.
553  uint32_t m_NumClasses;
554  /// Use Regular NMS.
556  /// Center size encoding scale x.
557  float m_ScaleX;
558  /// Center size encoding scale y.
559  float m_ScaleY;
560  /// Center size encoding scale weight.
561  float m_ScaleW;
562  /// Center size encoding scale height.
563  float m_ScaleH;
564 };
565 
566 /// A NormalizationDescriptor for the NormalizationLayer.
568 {
570  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
571  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
572  , m_NormSize(0)
573  , m_Alpha(0.f)
574  , m_Beta(0.f)
575  , m_K(0.f)
576  , m_DataLayout(DataLayout::NCHW)
577  {}
578 
579  bool operator ==(const NormalizationDescriptor& rhs) const
580  {
581  return m_NormChannelType == rhs.m_NormChannelType &&
582  m_NormMethodType == rhs.m_NormMethodType &&
583  m_NormSize == rhs.m_NormSize &&
584  m_Alpha == rhs.m_Alpha &&
585  m_Beta == rhs.m_Beta &&
586  m_K == rhs.m_K &&
587  m_DataLayout == rhs.m_DataLayout;
588  }
589 
590  /// Normalization channel algorithm to use (Across, Within).
592  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
594  /// Depth radius value.
595  uint32_t m_NormSize;
596  /// Alpha value for the normalization equation.
597  float m_Alpha;
598  /// Beta value for the normalization equation.
599  float m_Beta;
600  /// Kappa value used for the across channel normalization equation.
601  float m_K;
602  /// The data layout to be used (NCHW, NHWC).
604 };
605 
606 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
608 {
610  : m_Eps(1e-12f)
611  , m_DataLayout(DataLayout::NCHW)
612  {}
613 
614  bool operator ==(const L2NormalizationDescriptor& rhs) const
615  {
616  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
617  }
618 
619  /// Used to avoid dividing by zero.
620  float m_Eps;
621  /// The data layout to be used (NCHW, NHWC).
623 };
624 
625 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
627 {
629  : m_Eps(0.0001f)
630  , m_DataLayout(DataLayout::NCHW)
631  {}
632 
634  {
635  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
636  }
637 
638  /// Value to add to the variance. Used to avoid dividing by zero.
639  float m_Eps;
640  /// The data layout to be used (NCHW, NHWC).
642 };
643 
644 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
646 {
648  : m_Gamma(1.0f)
649  , m_Beta(0.0f)
650  , m_Eps(1e-12f)
651  , m_DataLayout(DataLayout::NCHW)
652  {}
653 
655  {
656  return m_Gamma == rhs.m_Gamma &&
657  m_Beta == rhs.m_Beta &&
658  m_Eps == rhs.m_Eps &&
659  m_DataLayout == rhs.m_DataLayout;
660  }
661 
662  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
663  float m_Gamma;
664  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
665  float m_Beta;
666  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
667  float m_Eps;
668  /// The data layout to be used (NCHW, NHWC).
670 };
671 
672 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
674 {
676  : m_BlockShape({1, 1})
677  , m_Crops({{0, 0}, {0, 0}})
678  , m_DataLayout(DataLayout::NCHW)
679  {}
680 
681  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
682  std::vector<std::pair<unsigned int, unsigned int>> crops)
683  : m_BlockShape(blockShape)
684  , m_Crops(crops)
685  , m_DataLayout(DataLayout::NCHW)
686  {}
687 
688  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
689  {
690  return m_BlockShape == rhs.m_BlockShape &&
691  m_Crops == rhs.m_Crops &&
692  m_DataLayout == rhs.m_DataLayout;
693  }
694 
695  /// Block shape values.
696  std::vector<unsigned int> m_BlockShape;
697  /// The values to crop from the input dimension.
698  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
699  /// The data layout to be used (NCHW, NHWC).
701 };
702 
703 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
705 {
707  : m_Min(-6.0f)
708  , m_Max(6.0f)
709  {}
710 
712  {
713  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
714  }
715 
716  /// Minimum value.
717  float m_Min;
718  /// Maximum value.
719  float m_Max;
720 };
721 
722 /// A FillDescriptor for the FillLayer
724 {
726  : m_Value(0)
727  {}
728 
729  FillDescriptor(const float& value)
730  : m_Value(value)
731  {}
732 
733  bool operator ==(const FillDescriptor& rhs) const
734  {
735  return m_Value == rhs.m_Value;
736  }
737 
738  float m_Value;
739 };
740 
741 /// A GatherDescriptor for the GatherLayer.
743 {
745  : m_Axis(0)
746  {}
747 
748  GatherDescriptor(int32_t axis)
749  : m_Axis(axis)
750  {}
751 
752  bool operator ==(const GatherDescriptor& rhs) const
753  {
754  return m_Axis == rhs.m_Axis;
755  }
756 
757  /// The axis in params to gather indices from
758  int32_t m_Axis;
759 };
760 
761 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
763 {
765  : m_TargetWidth(0)
766  , m_TargetHeight(0)
767  , m_DataLayout(DataLayout::NCHW)
768  , m_AlignCorners(false)
769  , m_HalfPixelCenters(false)
770  {}
771 
772  bool operator ==(const ResizeBilinearDescriptor& rhs) const
773  {
774  return m_TargetWidth == rhs.m_TargetWidth &&
775  m_TargetHeight == rhs.m_TargetHeight &&
776  m_DataLayout == rhs.m_DataLayout &&
777  m_AlignCorners == rhs.m_AlignCorners &&
778  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
779  }
780 
781  /// Target width value.
782  uint32_t m_TargetWidth;
783  /// Target height value.
784  uint32_t m_TargetHeight;
785  /// The data layout to be used (NCHW, NHWC).
787  /// Aligned corners
789  /// Half Pixel Centers
791 };
792 
793 /// A ResizeDescriptor for the ResizeLayer.
795 {
797  : m_TargetWidth(0)
798  , m_TargetHeight(0)
799  , m_Method(ResizeMethod::NearestNeighbor)
800  , m_DataLayout(DataLayout::NCHW)
801  , m_AlignCorners(false)
802  , m_HalfPixelCenters(false)
803  {}
804 
805  bool operator ==(const ResizeDescriptor& rhs) const
806  {
807  return m_TargetWidth == rhs.m_TargetWidth &&
808  m_TargetHeight == rhs.m_TargetHeight &&
809  m_Method == rhs.m_Method &&
810  m_DataLayout == rhs.m_DataLayout &&
811  m_AlignCorners == rhs.m_AlignCorners &&
812  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
813  }
814 
815  /// Target width value.
816  uint32_t m_TargetWidth;
817  /// Target height value.
818  uint32_t m_TargetHeight;
819  /// The Interpolation method to use
820  /// (Bilinear, NearestNeighbor).
822  /// The data layout to be used (NCHW, NHWC).
824  /// Aligned corners
826  /// Half Pixel Centers
828 };
829 
830 
831 /// A ReshapeDescriptor for the ReshapeLayer.
833 {
835  : m_TargetShape()
836  {}
837 
839  : m_TargetShape(shape)
840  {}
841 
842  bool operator ==(const ReshapeDescriptor& rhs) const
843  {
844  return m_TargetShape == rhs.m_TargetShape;
845  }
846 
847  /// Target shape value.
849 };
850 
851 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
853 {
855  : m_BlockShape({1, 1})
856  , m_PadList({{0, 0}, {0, 0}})
857  , m_DataLayout(DataLayout::NCHW)
858  {}
859 
860  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
861  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
862  : m_BlockShape(blockShape)
863  , m_PadList(padList)
864  , m_DataLayout(DataLayout::NCHW)
865  {}
866 
867  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
868  {
869  return m_BlockShape == rhs.m_BlockShape &&
870  m_PadList == rhs.m_PadList &&
871  m_DataLayout == rhs.m_DataLayout;
872  }
873 
874  /// Block shape value.
875  std::vector<unsigned int> m_BlockShape;
876  /// @brief Specifies the padding values for the input dimension:
877  /// heightPad{top, bottom} widthPad{left, right}.
878  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
879  /// The data layout to be used (NCHW, NHWC).
881 };
882 
883 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
885 {
888  {}
889 
890  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
891  : m_BlockSize(blockSize)
892  , m_DataLayout(dataLayout)
893  {}
894 
895  bool operator ==(const SpaceToDepthDescriptor& rhs) const
896  {
897  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
898  }
899 
900  /// Scalar specifying the input block size. It must be >= 1
901  unsigned int m_BlockSize;
902 
903  /// The data layout to be used (NCHW, NHWC).
905 };
906 
907 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
909 
910 /// An LstmDescriptor for the LstmLayer.
912 {
914  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
915  , m_ClippingThresCell(0.0)
916  , m_ClippingThresProj(0.0)
917  , m_CifgEnabled(true)
918  , m_PeepholeEnabled(false)
919  , m_ProjectionEnabled(false)
920  , m_LayerNormEnabled(false)
921  {}
922 
923  bool operator ==(const LstmDescriptor& rhs) const
924  {
925  return m_ActivationFunc == rhs.m_ActivationFunc &&
926  m_ClippingThresCell == rhs.m_ClippingThresCell &&
927  m_ClippingThresProj == rhs.m_ClippingThresProj &&
928  m_CifgEnabled == rhs.m_CifgEnabled &&
929  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
930  m_LayerNormEnabled == rhs.m_LayerNormEnabled;
931  }
932 
933  /// @brief The activation function to use.
934  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
936  /// Clipping threshold value for the cell state.
938  /// Clipping threshold value for the projection.
940  /// Enable/disable cifg (coupled input & forget gate).
942  /// Enable/disable peephole.
944  /// Enable/disable the projection layer.
946  /// Enable/disable layer normalization
948 };
949 
950 /// A MeanDescriptor for the MeanLayer.
952 {
954  : m_Axis()
955  , m_KeepDims(false)
956  {}
957 
958  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
959  : m_Axis(axis)
960  , m_KeepDims(keepDims)
961  {}
962 
963  bool operator ==(const MeanDescriptor& rhs) const
964  {
965  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
966  }
967 
968  /// Values for the dimensions to reduce.
969  std::vector<unsigned int> m_Axis;
970  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
972 };
973 
974 /// A PadDescriptor for the PadLayer.
976 {
977  PadDescriptor() : m_PadValue(0)
978  {}
979 
980  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
981  : m_PadList(padList)
982  , m_PadValue(padValue)
983  {}
984 
985  bool operator ==(const PadDescriptor& rhs) const
986  {
987  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
988  }
989 
990  /// @brief Specifies the padding for input dimension.
991  /// First is the number of values to add before the tensor in the dimension.
992  /// Second is the number of values to add after the tensor in the dimension.
993  /// The number of pairs should match the number of dimensions in the input tensor.
994  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
995 
996  /// Optional value to use for padding, defaults to 0
997  float m_PadValue;
998 };
999 
1000 /// A SliceDescriptor for the SliceLayer.
1002 {
1003  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1004  : m_Begin(begin)
1005  , m_Size(size)
1006  {}
1007 
1009  {}
1010 
1011  bool operator ==(const SliceDescriptor& rhs) const
1012  {
1013  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1014  }
1015 
1016  /// Beginning indices of the slice in each dimension.
1017  std::vector<unsigned int> m_Begin;
1018 
1019  /// Size of the slice in each dimension.
1020  std::vector<unsigned int> m_Size;
1021 };
1022 
1023 /// A StackDescriptor for the StackLayer.
1025 {
1027  : m_Axis(0)
1028  , m_NumInputs(0)
1029  , m_InputShape()
1030  {}
1031 
1032  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1033  : m_Axis(axis)
1034  , m_NumInputs(numInputs)
1035  , m_InputShape(inputShape)
1036  {}
1037 
1038  bool operator ==(const StackDescriptor& rhs) const
1039  {
1040  return m_Axis == rhs.m_Axis &&
1041  m_NumInputs == rhs.m_NumInputs &&
1042  m_InputShape == rhs.m_InputShape;
1043  }
1044 
1045  /// 0-based axis along which to stack the input tensors.
1046  uint32_t m_Axis;
1047  /// Number of input tensors.
1048  uint32_t m_NumInputs;
1049  /// Required shape of all input tensors.
1051 };
1052 
1053 /// A StandInDescriptor for the StandIn layer
1055 {
1057 
1058  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1059  : m_NumInputs(numInputs)
1060  , m_NumOutputs(numOutputs)
1061  {}
1062 
1063  bool operator ==(const StandInDescriptor& rhs) const
1064  {
1065  return m_NumInputs == rhs.m_NumInputs &&
1066  m_NumOutputs == rhs.m_NumOutputs;
1067  }
1068 
1069  /// Number of input tensors
1070  uint32_t m_NumInputs = 0;
1071  /// Number of output tensors
1072  uint32_t m_NumOutputs = 0;
1073 };
1074 
1075 /// A StridedSliceDescriptor for the StridedSliceLayer.
1077 {
1078  StridedSliceDescriptor(const std::vector<int>& begin,
1079  const std::vector<int>& end,
1080  const std::vector<int>& stride)
1081  : m_Begin(begin)
1082  , m_End(end)
1083  , m_Stride(stride)
1084  , m_BeginMask(0)
1085  , m_EndMask(0)
1086  , m_ShrinkAxisMask(0)
1087  , m_EllipsisMask(0)
1088  , m_NewAxisMask(0)
1089  , m_DataLayout(DataLayout::NCHW)
1090  {}
1091 
1093  : StridedSliceDescriptor({}, {}, {})
1094  {}
1095 
1096  bool operator ==(const StridedSliceDescriptor& rhs) const
1097  {
1098  return m_Begin == rhs.m_Begin &&
1099  m_End == rhs.m_End &&
1100  m_Stride == rhs.m_Stride &&
1101  m_BeginMask == rhs.m_BeginMask &&
1102  m_EndMask == rhs.m_EndMask &&
1103  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1104  m_EllipsisMask == rhs.m_EllipsisMask &&
1105  m_NewAxisMask == rhs.m_NewAxisMask &&
1106  m_DataLayout == rhs.m_DataLayout;
1107  }
1108 
1109  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1110  int GetStopForAxis(const TensorShape& inputShape,
1111  unsigned int axis,
1112  int startForAxis) const;
1113 
1114  /// Begin values for the input that will be sliced.
1115  std::vector<int> m_Begin;
1116  /// End values for the input that will be sliced.
1117  std::vector<int> m_End;
1118  /// Stride values for the input that will be sliced.
1119  std::vector<int> m_Stride;
1120 
1121  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1122  /// range is used for the dimension.
1123  int32_t m_BeginMask;
1124  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1125  /// is used for the dimension.
1126  int32_t m_EndMask;
1127  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1129  /// Ellipsis mask value.
1131  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1132  /// a new 1 dimension is inserted to this location of the output tensor.
1133  int32_t m_NewAxisMask;
1134 
1135  /// The data layout to be used (NCHW, NHWC).
1137 };
1138 
1139 /// A PreCompiledDescriptor for the PreCompiledLayer.
1141 {
1142  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1143  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1144  {}
1145 
1146  ~PreCompiledDescriptor() = default;
1147 
1148  unsigned int m_NumInputSlots;
1149  unsigned int m_NumOutputSlots;
1150 };
1151 
1152 /// A QLstmDescriptor for the QLstmLayer.
1154 {
1156  : m_CellClip(0.0)
1157  , m_ProjectionClip(0.0)
1158  , m_CifgEnabled(true)
1159  , m_PeepholeEnabled(false)
1160  , m_ProjectionEnabled(false)
1161  , m_LayerNormEnabled(false)
1162  , m_InputIntermediateScale(0.0)
1163  , m_ForgetIntermediateScale(0.0)
1164  , m_CellIntermediateScale(0.0)
1165  , m_OutputIntermediateScale(0.0)
1166  , m_HiddenStateZeroPoint(0)
1167  , m_HiddenStateScale(0.0)
1168  {}
1169 
1170  bool operator ==(const QLstmDescriptor& rhs) const
1171  {
1172  return m_CellClip == rhs.m_CellClip &&
1173  m_ProjectionClip == rhs.m_ProjectionClip &&
1174  m_CifgEnabled == rhs.m_CifgEnabled &&
1175  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1176  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1177  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1178  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1179  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1180  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1181  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1182  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1183  m_HiddenStateScale == rhs.m_HiddenStateScale;
1184  }
1185 
1186  /// Clipping threshold value for the cell state
1187  float m_CellClip;
1188  /// Clipping threshold value for the projection
1190  /// Enable/disable CIFG (coupled input & forget gate).
1192  /// Enable/disable peephole
1194  /// Enable/disable the projection layer
1196  /// Enable/disable layer normalization
1198  /// Input intermediate quantization scale
1200  /// Forget intermediate quantization scale
1202  /// Cell intermediate quantization scale
1204  /// Output intermediate quantization scale
1206  /// Hidden State zero point
1208  /// Hidden State quantization scale
1210 };
1211 
1212 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1214 {
1216  m_PadLeft(0),
1217  m_PadRight(0),
1218  m_PadTop(0),
1219  m_PadBottom(0),
1220  m_StrideX(0),
1221  m_StrideY(0),
1222  m_BiasEnabled(false),
1223  m_DataLayout(DataLayout::NCHW),
1224  m_OutputShapeEnabled(false)
1225  {}
1226 
1228  {
1229  return m_PadLeft == rhs.m_PadLeft &&
1230  m_PadRight == rhs.m_PadRight &&
1231  m_PadTop == rhs.m_PadTop &&
1232  m_PadBottom == rhs.m_PadBottom &&
1233  m_StrideX == rhs.m_StrideX &&
1234  m_StrideY == rhs.m_StrideY &&
1235  m_BiasEnabled == rhs.m_BiasEnabled &&
1236  m_DataLayout == rhs.m_DataLayout &&
1237  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1238  m_OutputShape == rhs.m_OutputShape;
1239  }
1240 
1241  /// Padding left value in the width dimension.
1242  uint32_t m_PadLeft;
1243  /// Padding right value in the width dimension.
1244  uint32_t m_PadRight;
1245  /// Padding top value in the height dimension.
1246  uint32_t m_PadTop;
1247  /// Padding bottom value in the height dimension.
1248  uint32_t m_PadBottom;
1249  /// Stride value when proceeding through input for the width dimension.
1250  uint32_t m_StrideX;
1251  /// Stride value when proceeding through input for the height dimension.
1252  uint32_t m_StrideY;
1253  /// Enable/disable bias.
1255  /// The data layout to be used (NCHW, NHWC).
1257  /// Output shape if it has been specified.
1259  std::vector<unsigned int> m_OutputShape;
1260 };
1261 
1262 /// A TransposeDescriptor for the TransposeLayer.
1264 {
1266  : m_DimMappings{}
1267  {}
1268 
1270  : m_DimMappings(dimMappings)
1271  {}
1272 
1273  bool operator ==(const TransposeDescriptor &rhs) const
1274  {
1275  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1276  }
1277 
1278  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1279  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1281 };
1282 
1283 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1285 {
1288  {}
1289 
1291  : m_Operation(operation)
1292  {}
1293 
1294  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1295  {
1296  return m_Operation == rhs.m_Operation;
1297  }
1298 
1299  /// Specifies the logical operation to execute
1301 };
1302 
1303 /// A ReduceDescriptor for the REDUCE operators.
1305 {
1307  : m_KeepDims(false)
1308  , m_vAxis()
1309  , m_ReduceOperation(ReduceOperation::Sum)
1310  {}
1311 
1312  bool operator ==(const ReduceDescriptor& rhs) const
1313  {
1314  return m_KeepDims == rhs.m_KeepDims &&
1315  m_vAxis == rhs.m_vAxis &&
1316  m_ReduceOperation == rhs.m_ReduceOperation;
1317  }
1318 
1319  /// if true then output shape has no change.
1321  /// The indices of the dimensions to reduce.
1322  std::vector<uint32_t> m_vAxis;
1323  /// Specifies the reduction operation to execute
1325 };
1326 
1327 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_HalfPixelCenters
Half Pixel Centers.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
bool m_AlignCorners
Aligned corners.
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:50
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout::NCHW false
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:33
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:149
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:104
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:88
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:141
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:84
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:78
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:111
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:32
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:26
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:163
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:72
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:119
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:94
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
float m_ScaleY
Center size encoding scale y.
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:155
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
armnn::DataType m_Output_Type
Definition: Descriptors.hpp:74
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:56
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.