ArmNN
 21.02
ArmComputeUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/Tensor.hpp>
11 
12 #include <arm_compute/core/Types.h>
13 
14 namespace armnn
15 {
16 
17 inline arm_compute::NormalizationLayerInfo
19  armnn::DataLayout dataLayout)
20 {
21  unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
22  const unsigned int depth = tensorInfo.GetShape()[depthDimension];
23 
24  // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
25  // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
26  // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
27  // parameters.
28  //
29  // Please refer to both the reference implementation of the normalization layer and the implementation of
30  // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
31 
32  // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
33  // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
34  // ACL's normalization_layer_cross_map() CL function.
35  const uint32_t normSize = depth * 2u + 1u;
36 
37  // See ACL's NormalizationLayerInfo::scale_coeff() definition.
38  // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
39  const float alpha = 1.0f;
40 
41  // Don't offset the reduction.
42  const float kappa = 0.0f;
43 
44  // pow(reduction, -0.5) = 1 / sqrt(reduction)
45  const float beta = 0.5f;
46 
47  return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
48 }
49 
52 {
53  using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
54 
55  switch (armnnFunction)
56  {
57  case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
58  // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
59  case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
60  case ActivationFunction::ReLu: return AclActivationFunction::RELU;
61  case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
62  case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
63  case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
64  case ActivationFunction::Abs: return AclActivationFunction::ABS;
65  case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
66  case ActivationFunction::Square: return AclActivationFunction::SQUARE;
67  case ActivationFunction::TanH: return AclActivationFunction::TANH;
68  case ActivationFunction::Elu: return AclActivationFunction::ELU;
69  case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH;
70  default: throw InvalidArgumentException("Unsupported activation function");
71  }
72 }
73 
74 inline arm_compute::ActivationLayerInfo
76 {
77  return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
78  actDesc.m_A, actDesc.m_B);
79 }
80 
81 inline arm_compute::ActivationLayerInfo
83 {
84  if (activationDescPtr != nullptr)
85  {
86  return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
87  *activationDescPtr));
88  }
89  return arm_compute::ActivationLayerInfo();
90 }
91 
92 inline arm_compute::ActivationLayerInfo
94 {
95  const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
96 
97  if (activationDescPtr != nullptr)
98  {
99  return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
100  *activationDescPtr));
101  }
102  return arm_compute::ActivationLayerInfo();
103 }
104 
106 {
107  switch (descriptor.m_Operation)
108  {
109  case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater;
110  case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual;
111  case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less;
112  case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual;
113  case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal;
114  case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual;
115  default: throw InvalidArgumentException("Unsupported comparison function");
116  }
117 }
118 
119 inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
120 {
121  using arm_compute::PoolingType;
122 
123  switch (poolingAlgorithm)
124  {
125  case PoolingAlgorithm::Max: return PoolingType::MAX;
126  case PoolingAlgorithm::Average: return PoolingType::AVG;
128  default: throw InvalidArgumentException("Unsupported pooling algorithm");
129  }
130 }
131 
133  rounding)
134 {
135  using arm_compute::DimensionRoundingType;
136 
137  switch (rounding)
138  {
139  case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
140  case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
141  default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
142  }
143 }
144 
145 inline arm_compute::NormType
147 {
148  using arm_compute::NormType;
149  switch (channelType)
150  {
151  case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
152  case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
153  default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
154  }
155 }
156 
157 inline arm_compute::FullyConnectedLayerInfo
159  const ActivationDescriptor* activationDesc)
160 {
161  arm_compute::FullyConnectedLayerInfo fc_info;
162  fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
163  fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc);
164  return fc_info;
165 }
166 
167 inline arm_compute::FullyConnectedLayerInfo
169  arm_compute::ActivationLayerInfo activationLayerInfo)
170 {
171  arm_compute::FullyConnectedLayerInfo fc_info;
172  fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
173  fc_info.activation_info = activationLayerInfo;
174  return fc_info;
175 }
176 
177 inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
178 {
179  switch (resizeMethod)
180  {
182  return arm_compute::InterpolationPolicy::BILINEAR;
184  return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
185  default:
186  throw InvalidArgumentException("Unsupported resize method");
187  }
188 }
189 
190 template<typename T>
191 inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
192 {
193  // Detect the Android default value of -1 and return the ACL default value of 0.
194  if (softmaxDesc.m_Axis == -1)
195  {
196  return 0;
197  }
198 
199  unsigned int dim = tensor.GetNumDimensions();
200 
201  ARMNN_ASSERT(dim != 0);
202 
203  // Currently ArmNN support axis 1.
204  auto aclAxis = (static_cast<T>(dim) - 1);
205  aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
206 
207  return aclAxis;
208 }
209 
210 inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
211 {
212  unsigned int numSplit = desc.GetNumViews();
213  unsigned int numDimensions = desc.GetNumDimensions();
214  std::set<unsigned int> splitAxis;
215 
216  for (unsigned int i = 0; i < numSplit; ++i)
217  {
218  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
219  {
220  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
221  {
222  splitAxis.insert(dimIdx);
223  }
224  }
225  }
226  return splitAxis;
227 }
228 
229 /// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank)
230 inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
231 {
232  int rank = static_cast<int>(tensor.GetNumDimensions());
233 
234  ARMNN_ASSERT(rank != 0);
235  ARMNN_ASSERT((-1 * rank) <= armnnAxis);
236  ARMNN_ASSERT(armnnAxis < rank);
237 
238  int sign = (armnnAxis < 0) ? -1 : 1;
239  int aclAxis = sign * rank - 1 - armnnAxis;
240 
241  return aclAxis;
242 }
243 
244 /// Function to convert axis to its positive equivalent value.
245 /// [-rank, rank) --> [0, rank)
246 inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo& tensor)
247 {
248  int rank = static_cast<int>(tensor.GetNumDimensions());
249 
250  ARMNN_ASSERT(rank != 0);
251  ARMNN_ASSERT((-1 * rank) <= axis);
252  ARMNN_ASSERT(axis < rank);
253 
254  int positiveAxis = (axis < 0) ? rank + axis : axis;
255  return static_cast<unsigned int>(positiveAxis);
256 }
257 
258 inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor)
259 {
260  switch (descriptor.m_ReduceOperation)
261  {
262  case ReduceOperation::Sum: return arm_compute::ReductionOperation::SUM;
263  case ReduceOperation::Mean: return arm_compute::ReductionOperation::MEAN_SUM;
264  case ReduceOperation::Max: return arm_compute::ReductionOperation::MAX;
265  case ReduceOperation::Min: return arm_compute::ReductionOperation::MIN;
266  default: throw InvalidArgumentException("Unsupported Reduction operation");
267  }
268 }
269 
270 } // namespace armnn
A ViewsDescriptor for the SplitterLayer.
arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
int ComputeAclAxis(const int &armnnAxis, const armnn::TensorInfo &tensor)
Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank...
DataLayout
Definition: Types.hpp:50
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
arm_compute::NormalizationLayerInfo CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo &tensorInfo, armnn::DataLayout dataLayout)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
NormalizationAlgorithmChannel
Definition: Types.hpp:149
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Copyright (c) 2021 ARM Limited and Contributors.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
PoolingAlgorithm
Definition: Types.hpp:104
unsigned int ComputePositiveAxis(const int &axis, const armnn::TensorInfo &tensor)
Function to convert axis to its positive equivalent value.
arm_compute::ActivationLayerInfo::ActivationFunction ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor &descriptor)
ComparisonOperation
Definition: Types.hpp:78
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc, const ActivationDescriptor *activationDesc)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
min(a, max(b, input)) ReLu1 & ReLu6.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
OutputShapeRounding
Definition: Types.hpp:163
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
const T * GetAdditionalInformation() const
ResizeMethod
Definition: Types.hpp:119
arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor &descriptor)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
T ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
A SoftmaxDescriptor for the SoftmaxLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
ActivationFunction
Definition: Types.hpp:56