ArmNN
 22.02
ArmComputeUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/Tensor.hpp>
12 
13 #include <arm_compute/core/Types.h>
14 #include <arm_compute/runtime/FunctionDescriptors.h>
15 
16 #if defined(ARMCOMPUTENEON_ENABLED)
18 #endif
19 
20 #if defined(ARMCOMPUTECL_ENABLED)
22 #endif
23 
24 namespace armnn
25 {
26 
27 inline arm_compute::NormalizationLayerInfo
29  armnn::DataLayout dataLayout)
30 {
31  unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
32  const unsigned int depth = tensorInfo.GetShape()[depthDimension];
33 
34  // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
35  // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
36  // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
37  // parameters.
38  //
39  // Please refer to both the reference implementation of the normalization layer and the implementation of
40  // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
41 
42  // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
43  // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
44  // ACL's normalization_layer_cross_map() CL function.
45  const uint32_t normSize = depth * 2u + 1u;
46 
47  // See ACL's NormalizationLayerInfo::scale_coeff() definition.
48  // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
49  const float alpha = 1.0f;
50 
51  // Don't offset the reduction.
52  const float kappa = 0.0f;
53 
54  // pow(reduction, -0.5) = 1 / sqrt(reduction)
55  const float beta = 0.5f;
56 
57  return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
58 }
59 
62 {
63  using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
64 
65  switch (armnnFunction)
66  {
67  case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
68  // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
69  case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
70  case ActivationFunction::ReLu: return AclActivationFunction::RELU;
71  case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
72  case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
73  case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
74  case ActivationFunction::Abs: return AclActivationFunction::ABS;
75  case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
76  case ActivationFunction::Square: return AclActivationFunction::SQUARE;
77  case ActivationFunction::TanH: return AclActivationFunction::TANH;
78  case ActivationFunction::Elu: return AclActivationFunction::ELU;
79  case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH;
80  default: throw InvalidArgumentException("Unsupported activation function");
81  }
82 }
83 
84 inline arm_compute::ActivationLayerInfo
86 {
87  return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
88  actDesc.m_A, actDesc.m_B);
89 }
90 
91 inline arm_compute::ActivationLayerInfo
93 {
94  if (activationDescPtr != nullptr)
95  {
96  return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
97  *activationDescPtr));
98  }
99  return arm_compute::ActivationLayerInfo();
100 }
101 
102 inline arm_compute::ActivationLayerInfo
104 {
105  const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
106 
107  if (activationDescPtr != nullptr)
108  {
109  return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
110  *activationDescPtr));
111  }
112  return arm_compute::ActivationLayerInfo();
113 }
114 
116 {
117  switch (descriptor.m_Operation)
118  {
119  case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater;
120  case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual;
121  case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less;
122  case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual;
123  case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal;
124  case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual;
125  default: throw InvalidArgumentException("Unsupported comparison function");
126  }
127 }
128 
129 inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
130 {
131  using arm_compute::PoolingType;
132 
133  switch (poolingAlgorithm)
134  {
135  case PoolingAlgorithm::Max: return PoolingType::MAX;
136  case PoolingAlgorithm::Average: return PoolingType::AVG;
138  default: throw InvalidArgumentException("Unsupported pooling algorithm");
139  }
140 }
141 
143  rounding)
144 {
145  using arm_compute::DimensionRoundingType;
146 
147  switch (rounding)
148  {
149  case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
150  case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
151  default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
152  }
153 }
154 
155 inline arm_compute::NormType
157 {
158  using arm_compute::NormType;
159  switch (channelType)
160  {
161  case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
162  case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
163  default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
164  }
165 }
166 
167 inline arm_compute::FullyConnectedLayerInfo
169  const ActivationDescriptor* activationDesc)
170 {
171  arm_compute::FullyConnectedLayerInfo fc_info;
172  fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
173  fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc);
174  return fc_info;
175 }
176 
177 inline arm_compute::FullyConnectedLayerInfo
179  arm_compute::ActivationLayerInfo activationLayerInfo)
180 {
181  arm_compute::FullyConnectedLayerInfo fc_info;
182  fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
183  fc_info.activation_info = activationLayerInfo;
184  return fc_info;
185 }
186 
187 inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
188 {
189  switch (resizeMethod)
190  {
192  return arm_compute::InterpolationPolicy::BILINEAR;
194  return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
195  default:
196  throw InvalidArgumentException("Unsupported resize method");
197  }
198 }
199 
200 template<typename T>
201 inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
202 {
203  // Detect the Android default value of -1 and return the ACL default value of 0.
204  if (softmaxDesc.m_Axis == -1)
205  {
206  return 0;
207  }
208 
209  unsigned int dim = tensor.GetNumDimensions();
210 
211  ARMNN_ASSERT(dim != 0);
212 
213  // Currently ArmNN support axis 1.
214  auto aclAxis = (static_cast<T>(dim) - 1);
215  aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
216 
217  return aclAxis;
218 }
219 
220 inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
221 {
222  unsigned int numSplit = desc.GetNumViews();
223  unsigned int numDimensions = desc.GetNumDimensions();
224  std::set<unsigned int> splitAxis;
225 
226  for (unsigned int i = 0; i < numSplit; ++i)
227  {
228  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
229  {
230  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
231  {
232  splitAxis.insert(dimIdx);
233  }
234  }
235  }
236  return splitAxis;
237 }
238 
239 /// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank)
240 inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
241 {
242  int rank = static_cast<int>(tensor.GetNumDimensions());
243 
244  ARMNN_ASSERT(rank != 0);
245  ARMNN_ASSERT((-1 * rank) <= armnnAxis);
246  ARMNN_ASSERT(armnnAxis < rank);
247 
248  int sign = (armnnAxis < 0) ? -1 : 1;
249  int aclAxis = sign * rank - 1 - armnnAxis;
250 
251  return aclAxis;
252 }
253 
254 /// Function to convert axis to its positive equivalent value.
255 /// [-rank, rank) --> [0, rank)
256 inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo& tensor)
257 {
258  int rank = static_cast<int>(tensor.GetNumDimensions());
259 
260  ARMNN_ASSERT(rank != 0);
261  ARMNN_ASSERT((-1 * rank) <= axis);
262  ARMNN_ASSERT(axis < rank);
263 
264  int positiveAxis = (axis < 0) ? rank + axis : axis;
265  return static_cast<unsigned int>(positiveAxis);
266 }
267 
268 /// Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor.
269 inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dDescriptor descriptor,
270  bool isFastMathEnabled,
271  const ActivationDescriptor* activationDescriptor)
272 {
273  const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
274  const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
275  descriptor.m_PadTop, descriptor.m_PadBottom,
276  descriptor.m_PadFront, descriptor.m_PadBack};
277  const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
278 
279  const arm_compute::ActivationLayerInfo activationInfo =
281  const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
282 
283  return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
284 }
285 
286 inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dQueueDescriptor queueDescriptor,
287  bool isFastMathEnabled)
288 {
289  auto descriptor = queueDescriptor.m_Parameters;
290  const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
291  const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
292  descriptor.m_PadTop, descriptor.m_PadBottom,
293  descriptor.m_PadFront, descriptor.m_PadBack};
294  const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
295 
296  const arm_compute::ActivationLayerInfo activationInfo =
298  const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
299 
300  return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
301 }
302 
304 {
305  switch (paddingMode)
306  {
307  case PaddingMode::Constant: return arm_compute::PaddingMode::CONSTANT;
308  case PaddingMode::Reflect: return arm_compute::PaddingMode::REFLECT;
309  case PaddingMode::Symmetric: return arm_compute::PaddingMode::SYMMETRIC;
310  default: throw InvalidArgumentException("Unsupported Padding Mode");
311  }
312 }
313 
314 inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor)
315 {
316  switch (descriptor.m_ReduceOperation)
317  {
318  case ReduceOperation::Sum: return arm_compute::ReductionOperation::SUM;
319  case ReduceOperation::Mean: return arm_compute::ReductionOperation::MEAN_SUM;
320  case ReduceOperation::Max: return arm_compute::ReductionOperation::MAX;
321  case ReduceOperation::Min: return arm_compute::ReductionOperation::MIN;
322  case ReduceOperation::Prod: return arm_compute::ReductionOperation::PROD;
323  default: throw InvalidArgumentException("Unsupported Reduction operation");
324  }
325 }
326 
327 /// Function to compute the output tensor shape based on the axes and if keepDims is set.
329  const std::vector<uint32_t>& vAxis,
330  const bool keepDims)
331 {
332  auto reducedTensorInfo = input;
333  unsigned int rank = reducedTensorInfo.GetNumDimensions();
334  unsigned int outputRank = 0;
335  // Calculate output dimension
336  if (keepDims)
337  {
338  outputRank = rank;
339  }
340  else if (vAxis.empty())
341  {
342  outputRank = 1;
343  }
344  else if (vAxis.size() > reducedTensorInfo.GetNumDimensions())
345  {
346  throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions");
347  }
348  else
349  {
350  outputRank = reducedTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(vAxis.size());
351  if (outputRank == 0)
352  {
353  outputRank = 1;
354  }
355  }
356  std::vector<unsigned int> dimSizes(outputRank, 1);
357  if (!vAxis.empty())
358  {
359  // Skip the dimension that has been reduced unless keepDims is true.
360  unsigned int outputIndex = 0;
361  for (unsigned int i = 0; i < reducedTensorInfo.GetNumDimensions(); ++i)
362  {
363  if (std::find(vAxis.begin(), vAxis.end(), i) == vAxis.end())
364  {
365  dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(reducedTensorInfo.GetShape()[i]);
366  ++outputIndex;
367  }
368  else if (keepDims)
369  {
370  dimSizes[outputIndex] = 1;
371  ++outputIndex;
372  }
373  }
374  }
375  const TensorShape inferredShape = TensorShape(outputRank, dimSizes.data());
376  reducedTensorInfo.SetShape(inferredShape);
377  return reducedTensorInfo;
378 }
379 
380 /// Macro function check if layer with multiple axes is supported on each backend
381 #define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status) \
382  armnn::TensorInfo inputTensorInfo = input; \
383  unsigned int recalulatedAxis = 0; \
384  std::vector<uint32_t> axes; \
385  \
386  for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \
387  { \
388  axes.emplace_back(desc.m_vAxis[i]); \
389  \
390  const armnn::TensorInfo& reducedTensorInfo = \
391  ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \
392  \
393  std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \
394  \
395  armnn::ReduceDescriptor newReduceDescriptor = desc; \
396  newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \
397  \
398  status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \
399  if (!status) \
400  { \
401  break; \
402  } \
403  \
404  if (!desc.m_KeepDims) \
405  { \
406  recalulatedAxis++; \
407  } \
408  \
409  inputTensorInfo = reducedTensorInfo; \
410  }
411 
412 } // namespace armnn
A ViewsDescriptor for the SplitterLayer.
arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dDescriptor descriptor, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor...
int ComputeAclAxis(const int &armnnAxis, const armnn::TensorInfo &tensor)
Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank...
DataLayout
Definition: Types.hpp:49
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
arm_compute::NormalizationLayerInfo CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo &tensorInfo, armnn::DataLayout dataLayout)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
NormalizationAlgorithmChannel
Definition: Types.hpp:180
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Copyright (c) 2021 ARM Limited and Contributors.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
PoolingAlgorithm
Definition: Types.hpp:123
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int ComputePositiveAxis(const int &axis, const armnn::TensorInfo &tensor)
Function to convert axis to its positive equivalent value.
arm_compute::ActivationLayerInfo::ActivationFunction ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor &descriptor)
ComparisonOperation
Definition: Types.hpp:95
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc, const ActivationDescriptor *activationDesc)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
uint32_t m_PadFront
Padding front value in the depth dimension.
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant)...
Definition: Types.hpp:173
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
arm_compute::PaddingMode ConvertPaddingModeToAcl(const PaddingMode &paddingMode)
min(a, max(b, input)) ReLu1 & ReLu6.
uint32_t m_PadLeft
Padding left value in the width dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
OutputShapeRounding
Definition: Types.hpp:194
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
const T * GetAdditionalInformation() const
uint32_t m_PadTop
Padding top value in the height dimension.
ResizeMethod
Definition: Types.hpp:139
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor &descriptor)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
const TensorInfo ComputeReductionTensorShape(const armnn::TensorInfo &input, const std::vector< uint32_t > &vAxis, const bool keepDims)
Function to compute the output tensor shape based on the axes and if keepDims is set.
T ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
uint32_t m_DilationZ
Dilation along z axis.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
A SoftmaxDescriptor for the SoftmaxLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
uint32_t m_DilationY
Dilation along y axis.
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
ActivationFunction
Definition: Types.hpp:73