ArmNN
 20.05
ArmComputeTensorUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
7 
8 #include "armnn/Exceptions.hpp"
9 #include <armnn/Descriptors.hpp>
10 
11 namespace armnn
12 {
13 namespace armcomputetensorutils
14 {
15 
16 arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales)
17 {
18  switch(dataType)
19  {
21  return arm_compute::DataType::BFLOAT16;
23  return arm_compute::DataType::U8;
25  return arm_compute::DataType::F16;
27  return arm_compute::DataType::F32;
29  return arm_compute::DataType::QASYMM8_SIGNED;
31  return arm_compute::DataType::QASYMM8;
33  return arm_compute::DataType::QSYMM16;
35  {
36  return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
37  }
40  return arm_compute::DataType::QSYMM8_PER_CHANNEL;
43  return arm_compute::DataType::S32;
44  default:
45  ARMNN_ASSERT_MSG(false, "Unknown data type");
46  return arm_compute::DataType::UNKNOWN;
47  }
48 }
49 
50 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
51  unsigned int originalInputRank,
52  const std::vector<unsigned int>& armnnAxes)
53 {
54  arm_compute::Coordinates outAclCoords;
55 
56  if (armnnAxes.empty())
57  {
58  // If no reduction axes were provided, then the input must be reduced along all dimensions.
59  // Since Compute Library does not accept an empty vector as the reduction dimensions, we then
60  // manually create a vector including all the input dimensions (in reversed order) as:
61  //
62  // { inputDimensions - 1, inputDimensions - 2, ..., 1, 0 }
63  //
64  outAclCoords.set_num_dimensions(inputDimensions);
65  std::generate(outAclCoords.begin(), outAclCoords.end(), [d = inputDimensions - 1] () mutable { return d--; });
66  }
67  else
68  {
69  // Create a vector of reduction dimensions (in reversed order) with the given reduction axes.
70  //
71  // Adjust the given reduction axes according to the original rank of the input tensor (before ACL applied any
72  // dimension correction).
73  // For example, if the input tensor originally had 4 dimensions, and one of the reduction axes was 2, then the
74  // new value for that reduction axis should be 1.
75  //
76  // Example:
77  // ArmNN input shape = { 1, 1, 3, 2 } -> ACL input shape = { 2, 3 }
78  // ArmNN reduction axis = { 2 } -> ACL reduction axis = { 1 }
79  // ArmNN reduction axis = { 3 } -> ACL reduction axis = { 0 }
80  //
81  // The transformation: ACL reduction axis index = original rank - ArmNN reduction axis index - 1
82  //
83  outAclCoords.set_num_dimensions(armnnAxes.size());
84  std::transform(armnnAxes.begin(), armnnAxes.end(),
85  outAclCoords.begin(),
86  [originalInputRank](unsigned int i){ return originalInputRank - i - 1; });
87  }
88 
89  return outAclCoords;
90 }
91 
92 arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape)
93 {
94  arm_compute::TensorShape shape;
95 
96  // armnn tensors are (batch, channels, height, width).
97  // arm_compute tensors are (width, height, channels, batch).
98  for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++)
99  {
100  // Note that our dimensions are stored in the opposite order to ACL's.
101  shape.set(tensorShape.GetNumDimensions() - i - 1, tensorShape[i], false);
102 
103  // TensorShape::set() flattens leading ones, so that batch size 1 cannot happen.
104  // arm_compute tensors expect this.
105  }
106 
107  // prevent arm_compute issue where tensor is flattened to nothing
108  if (shape.num_dimensions() == 0)
109  {
110  shape.set_num_dimensions(1);
111  }
112 
113  return shape;
114 }
115 
116 // Utility function used to build a TensorInfo object, that can be used to initialise
117 // ARM Compute Tensor and CLTensor allocators.
118 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo)
119 {
120  bool multiScales = tensorInfo.HasMultipleQuantizationScales();
121  const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape());
122  const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType(), multiScales);
123 
124  const arm_compute::QuantizationInfo aclQuantizationInfo = multiScales ?
125  arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScales()) :
126  arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScale(), tensorInfo.GetQuantizationOffset());
127 
128  return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
129 }
130 
131 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
132  armnn::DataLayout dataLayout)
133 {
134  arm_compute::TensorInfo aclTensorInfo = BuildArmComputeTensorInfo(tensorInfo);
135  aclTensorInfo.set_data_layout(ConvertDataLayout(dataLayout));
136 
137  return aclTensorInfo;
138 }
139 
140 arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout)
141 {
142  switch(dataLayout)
143  {
144  case armnn::DataLayout::NHWC : return arm_compute::DataLayout::NHWC;
145 
146  case armnn::DataLayout::NCHW : return arm_compute::DataLayout::NCHW;
147 
148  default: throw InvalidArgumentException("Unknown armnn::DataLayout: [" +
149  std::to_string(static_cast<int>(dataLayout)) + "]");
150  }
151 }
152 
153 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
154  bool fpMixedPrecision)
155 {
156  using arm_compute::PoolingType;
157  using arm_compute::DimensionRoundingType;
158  using arm_compute::PadStrideInfo;
159  using arm_compute::PoolingLayerInfo;
160  using arm_compute::Size2D;
162 
163  // Resolve ARM Compute layer parameters.
164  const PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
165 
166  const DataLayout dataLayout = ConvertDataLayout(descriptor.m_DataLayout);
167 
168  bool isGlobalPooling = (descriptor.m_StrideX==0 && descriptor.m_StrideY==0);
169  //use specific constructor if global pooling
170  if(isGlobalPooling)
171  {
172  return arm_compute::PoolingLayerInfo(poolingType, dataLayout);
173  }
174 
175  const DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType(
176  descriptor.m_OutputShapeRounding);
177  const PadStrideInfo padStrideInfo(descriptor.m_StrideX,
178  descriptor.m_StrideY,
179  descriptor.m_PadLeft,
180  descriptor.m_PadRight,
181  descriptor.m_PadTop,
182  descriptor.m_PadBottom,
183  rounding);
184 
185  const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
186 
187  const Size2D poolSize(descriptor.m_PoolWidth, descriptor.m_PoolHeight);
188 
189  return arm_compute::PoolingLayerInfo(poolingType, poolSize, dataLayout, padStrideInfo, excludePadding,
190  fpMixedPrecision);
191 }
192 
193 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& descriptor)
194 {
195  const arm_compute::NormType normType =
196  ConvertNormalizationAlgorithmChannelToAclNormType(descriptor.m_NormChannelType);
197  return arm_compute::NormalizationLayerInfo(normType,
198  descriptor.m_NormSize,
199  descriptor.m_Alpha,
200  descriptor.m_Beta,
201  descriptor.m_K,
202  false);
203 }
204 
205 arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& perm)
206 {
207  arm_compute::PermutationVector aclPerm;
208 
209  unsigned int start = 0;
210  while ((start < perm.GetSize()) && (start == perm[start]))
211  {
212  ++start;
213  }
214 
215  for (unsigned int i = start; i < perm.GetSize(); ++i)
216  {
217  aclPerm.set(i - start, perm[i] - start);
218  }
219  return aclPerm;
220 }
221 
222 arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& perm)
223 {
224  arm_compute::PermutationVector aclPerm;
225  std::map<unsigned int, unsigned int> permuteMappings;
226  for (unsigned int i = 0; i < perm.GetSize(); ++i)
227  {
228  permuteMappings[perm[i]] = i;
229  }
230 
231  std::vector<unsigned int> permuteVector;
232  for (unsigned int i = 0; i < perm.GetSize(); ++i)
233  {
234  permuteVector.push_back(permuteMappings.at(i));
235  }
236 
237  unsigned int start = 0;
238  while ((start < perm.GetSize()) && (start == permuteVector[start]))
239  {
240  ++start;
241  }
242 
243  for (unsigned int i = start; i < perm.GetSize(); ++i)
244  {
245  aclPerm.set(i - start, permuteVector[i] - start);
246  }
247  return aclPerm;
248 }
249 
250 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height)
251 {
252  return arm_compute::Size2D(width, height);
253 }
254 
255 arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue)
256 {
257  switch (input.info()->data_type())
258  {
259  case arm_compute::DataType::F16:
260  return arm_compute::PixelValue(static_cast<Half>(pixelValue));
261  case arm_compute::DataType::F32:
262  return arm_compute::PixelValue(pixelValue);
263  case arm_compute::DataType::QASYMM8:
264  return arm_compute::PixelValue(static_cast<uint8_t>(pixelValue));
265  case arm_compute::DataType::QSYMM16:
266  return arm_compute::PixelValue(static_cast<int16_t>(pixelValue));
267  case arm_compute::DataType::QASYMM8_SIGNED:
268  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
269  return arm_compute::PixelValue(static_cast<int8_t>(pixelValue));
270  default:
271  throw InvalidArgumentException("Unsupported DataType: [" +
272  std::to_string(static_cast<int>(input.info()->data_type())) + "]");
273  }
274 }
275 
276 } // namespace armcomputetensorutils
277 } // namespace armnn
DataLayout
Definition: Types.hpp:49
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
The padding fields don&#39;t count and are ignored.
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
Copyright (c) 2020 ARM Limited.
SizeType GetSize() const
Definition: Types.hpp:202
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:238
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:265
float GetQuantizationScale() const
Definition: Tensor.cpp:248
DataType GetDataType() const
Definition: Tensor.hpp:95
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43