ArmNN
 20.02
ArmComputeTensorUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
7 
8 #include "armnn/Exceptions.hpp"
9 #include <armnn/Descriptors.hpp>
10 
11 namespace armnn
12 {
13 namespace armcomputetensorutils
14 {
15 
16 arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales)
17 {
18  switch(dataType)
19  {
21  return arm_compute::DataType::U8;
23  return arm_compute::DataType::F16;
25  return arm_compute::DataType::F32;
27  return arm_compute::DataType::QASYMM8_SIGNED;
29  return arm_compute::DataType::QASYMM8;
31  return arm_compute::DataType::QSYMM16;
33  {
34  return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
35  }
38  return arm_compute::DataType::QSYMM8_PER_CHANNEL;
41  return arm_compute::DataType::S32;
42  default:
43  BOOST_ASSERT_MSG(false, "Unknown data type");
44  return arm_compute::DataType::UNKNOWN;
45  }
46 }
47 
48 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
49  unsigned int originalInputRank,
50  const std::vector<unsigned int>& armnnAxes)
51 {
52  arm_compute::Coordinates outAclCoords;
53 
54  if (armnnAxes.empty())
55  {
56  // If no reduction axes were provided, then the input must be reduced along all dimensions.
57  // Since Compute Library does not accept an empty vector as the reduction dimensions, we then
58  // manually create a vector including all the input dimensions (in reversed order) as:
59  //
60  // { inputDimensions - 1, inputDimensions - 2, ..., 1, 0 }
61  //
62  outAclCoords.set_num_dimensions(inputDimensions);
63  std::generate(outAclCoords.begin(), outAclCoords.end(), [d = inputDimensions - 1] () mutable { return d--; });
64  }
65  else
66  {
67  // Create a vector of reduction dimensions (in reversed order) with the given reduction axes.
68  //
69  // Adjust the given reduction axes according to the original rank of the input tensor (before ACL applied any
70  // dimension correction).
71  // For example, if the input tensor originally had 4 dimensions, and one of the reduction axes was 2, then the
72  // new value for that reduction axis should be 1.
73  //
74  // Example:
75  // ArmNN input shape = { 1, 1, 3, 2 } -> ACL input shape = { 2, 3 }
76  // ArmNN reduction axis = { 2 } -> ACL reduction axis = { 1 }
77  // ArmNN reduction axis = { 3 } -> ACL reduction axis = { 0 }
78  //
79  // The transformation: ACL reduction axis index = original rank - ArmNN reduction axis index - 1
80  //
81  outAclCoords.set_num_dimensions(armnnAxes.size());
82  std::transform(armnnAxes.begin(), armnnAxes.end(),
83  outAclCoords.begin(),
84  [originalInputRank](unsigned int i){ return originalInputRank - i - 1; });
85  }
86 
87  return outAclCoords;
88 }
89 
90 arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape)
91 {
92  arm_compute::TensorShape shape;
93 
94  // armnn tensors are (batch, channels, height, width).
95  // arm_compute tensors are (width, height, channels, batch).
96  for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++)
97  {
98  // Note that our dimensions are stored in the opposite order to ACL's.
99  shape.set(tensorShape.GetNumDimensions() - i - 1, tensorShape[i], false);
100 
101  // TensorShape::set() flattens leading ones, so that batch size 1 cannot happen.
102  // arm_compute tensors expect this.
103  }
104 
105  // prevent arm_compute issue where tensor is flattened to nothing
106  if (shape.num_dimensions() == 0)
107  {
108  shape.set_num_dimensions(1);
109  }
110 
111  return shape;
112 }
113 
114 // Utility function used to build a TensorInfo object, that can be used to initialise
115 // ARM Compute Tensor and CLTensor allocators.
116 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo)
117 {
118  bool multiScales = tensorInfo.HasMultipleQuantizationScales();
119  const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape());
120  const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType(), multiScales);
121 
122  const arm_compute::QuantizationInfo aclQuantizationInfo = multiScales ?
123  arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScales()) :
124  arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScale(), tensorInfo.GetQuantizationOffset());
125 
126  return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
127 }
128 
129 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
130  armnn::DataLayout dataLayout)
131 {
132  arm_compute::TensorInfo aclTensorInfo = BuildArmComputeTensorInfo(tensorInfo);
133  aclTensorInfo.set_data_layout(ConvertDataLayout(dataLayout));
134 
135  return aclTensorInfo;
136 }
137 
138 arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout)
139 {
140  switch(dataLayout)
141  {
142  case armnn::DataLayout::NHWC : return arm_compute::DataLayout::NHWC;
143 
144  case armnn::DataLayout::NCHW : return arm_compute::DataLayout::NCHW;
145 
146  default: throw InvalidArgumentException("Unknown armnn::DataLayout: [" +
147  std::to_string(static_cast<int>(dataLayout)) + "]");
148  }
149 }
150 
151 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
152  bool fpMixedPrecision)
153 {
154  using arm_compute::PoolingType;
155  using arm_compute::DimensionRoundingType;
156  using arm_compute::PadStrideInfo;
157  using arm_compute::PoolingLayerInfo;
158  using arm_compute::Size2D;
160 
161  // Resolve ARM Compute layer parameters.
162  const PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
163 
164  const DataLayout dataLayout = ConvertDataLayout(descriptor.m_DataLayout);
165 
166  bool isGlobalPooling = (descriptor.m_StrideX==0 && descriptor.m_StrideY==0);
167  //use specific constructor if global pooling
168  if(isGlobalPooling)
169  {
170  return arm_compute::PoolingLayerInfo(poolingType, dataLayout);
171  }
172 
173  const DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType(
174  descriptor.m_OutputShapeRounding);
175  const PadStrideInfo padStrideInfo(descriptor.m_StrideX,
176  descriptor.m_StrideY,
177  descriptor.m_PadLeft,
178  descriptor.m_PadRight,
179  descriptor.m_PadTop,
180  descriptor.m_PadBottom,
181  rounding);
182 
183  const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
184 
185  const Size2D poolSize(descriptor.m_PoolWidth, descriptor.m_PoolHeight);
186 
187  return arm_compute::PoolingLayerInfo(poolingType, poolSize, dataLayout, padStrideInfo, excludePadding,
188  fpMixedPrecision);
189 }
190 
191 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& descriptor)
192 {
193  const arm_compute::NormType normType =
194  ConvertNormalizationAlgorithmChannelToAclNormType(descriptor.m_NormChannelType);
195  return arm_compute::NormalizationLayerInfo(normType,
196  descriptor.m_NormSize,
197  descriptor.m_Alpha,
198  descriptor.m_Beta,
199  descriptor.m_K,
200  false);
201 }
202 
203 arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& perm)
204 {
205  arm_compute::PermutationVector aclPerm;
206 
207  unsigned int start = 0;
208  while ((start < perm.GetSize()) && (start == perm[start]))
209  {
210  ++start;
211  }
212 
213  for (unsigned int i = start; i < perm.GetSize(); ++i)
214  {
215  aclPerm.set(i - start, perm[i] - start);
216  }
217  return aclPerm;
218 }
219 
220 arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& perm)
221 {
222  arm_compute::PermutationVector aclPerm;
223  std::map<unsigned int, unsigned int> permuteMappings;
224  for (unsigned int i = 0; i < perm.GetSize(); ++i)
225  {
226  permuteMappings[perm[i]] = i;
227  }
228 
229  std::vector<unsigned int> permuteVector;
230  for (unsigned int i = 0; i < perm.GetSize(); ++i)
231  {
232  permuteVector.push_back(permuteMappings.at(i));
233  }
234 
235  unsigned int start = 0;
236  while ((start < perm.GetSize()) && (start == permuteVector[start]))
237  {
238  ++start;
239  }
240 
241  for (unsigned int i = start; i < perm.GetSize(); ++i)
242  {
243  aclPerm.set(i - start, permuteVector[i] - start);
244  }
245  return aclPerm;
246 }
247 
248 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height)
249 {
250  return arm_compute::Size2D(width, height);
251 }
252 
253 arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue)
254 {
255  switch (input.info()->data_type())
256  {
257  case arm_compute::DataType::F16:
258  return arm_compute::PixelValue(static_cast<Half>(pixelValue));
259  case arm_compute::DataType::F32:
260  return arm_compute::PixelValue(pixelValue);
261  case arm_compute::DataType::QASYMM8:
262  return arm_compute::PixelValue(static_cast<uint8_t>(pixelValue));
263  case arm_compute::DataType::QSYMM16:
264  return arm_compute::PixelValue(static_cast<int16_t>(pixelValue));
265  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
266  return arm_compute::PixelValue(static_cast<int8_t>(pixelValue));
267  default:
268  throw InvalidArgumentException("Unsupported DataType: [" +
269  std::to_string(static_cast<int>(input.info()->data_type())) + "]");
270  }
271 }
272 
273 } // namespace armcomputetensorutils
274 } // namespace armnn
DataLayout
Definition: Types.hpp:49
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
The padding fields don&#39;t count and are ignored.
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
Copyright (c) 2020 ARM Limited.
SizeType GetSize() const
Definition: Types.hpp:202
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:237
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
float GetQuantizationScale() const
Definition: Tensor.cpp:247
DataType GetDataType() const
Definition: Tensor.hpp:95
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43