ArmNN
 22.02
ArmComputeTensorUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Tensor.hpp>
9 
11 
12 #include <arm_compute/core/ITensor.h>
13 #include <arm_compute/core/TensorInfo.h>
14 #include <arm_compute/core/Types.h>
15 
16 #include <Half.hpp>
17 
18 namespace armnn
19 {
20 class ITensorHandle;
21 
22 namespace armcomputetensorutils
23 {
24 
25 /// Utility function to map an armnn::DataType to corresponding arm_compute::DataType.
26 arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
27 
28 /// Utility function used to set up an arm_compute::Coordinates from a vector of ArmNN Axes for reduction functions
29 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
30  unsigned int originalInputRank,
31  const std::vector<unsigned int>& armnnAxes);
32 
33 /// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape.
34 arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape);
35 
36 /// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
37 /// armnn::ITensorInfo.
38 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
39 
40 /// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
41 /// armnn::ITensorInfo.
42 /// armnn::DataLayout.
43 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
44  armnn::DataLayout dataLayout);
45 
46 /// Utility function used to convert armnn::DataLayout to arm_compute::DataLayout
47 /// armnn::DataLayout.
48 arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
49 
50 /// Utility function used to setup an arm_compute::PoolingLayerInfo object from given
51 /// armnn::Pooling2dDescriptor
52 /// bool fpMixedPrecision
53 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
54  bool fpMixedPrecision = false);
55 
56 /// Utility function to setup an arm_compute::NormalizationLayerInfo object from an armnn::NormalizationDescriptor.
57 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc);
58 
59 /// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
60 arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector);
61 
62 /// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
63 arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& vector);
64 
65 /// Utility function used to setup an arm_compute::Size2D object from width and height values.
66 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
67 
68 /// Gets the appropriate PixelValue for the TensorInfo DataType
69 arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue);
70 
71 /// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
72 template <typename Descriptor>
73 arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descriptor)
74 {
75  return arm_compute::PadStrideInfo(descriptor.m_StrideX,
76  descriptor.m_StrideY,
77  descriptor.m_PadLeft,
78  descriptor.m_PadRight,
79  descriptor.m_PadTop,
80  descriptor.m_PadBottom,
81  arm_compute::DimensionRoundingType::FLOOR);
82 }
83 
84 /// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
85 template <typename Tensor>
86 void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo)
87 {
88  tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
89 }
90 
91 /// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
92 template <typename Tensor>
93 void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
94 {
95  tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
96 }
97 
98 template <typename Tensor>
99 void InitialiseArmComputeTensorEmpty(Tensor& tensor)
100 {
101  tensor.allocator()->allocate();
102 }
103 
104 /// Utility function to free unused tensors after a workload is configured and prepared
105 template <typename Tensor>
106 void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
107 {
108  if (tensor && !tensor->is_used())
109  {
110  tensor.reset(nullptr);
111  }
112 }
113 
114 // Helper function to obtain byte offset into tensor data
115 inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
116  uint32_t depthIndex,
117  uint32_t batchIndex,
118  uint32_t channelIndex,
119  uint32_t y,
120  uint32_t x)
121 {
123  coords.set(4, static_cast<int>(depthIndex));
124  coords.set(3, static_cast<int>(batchIndex));
125  coords.set(2, static_cast<int>(channelIndex));
126  coords.set(1, static_cast<int>(y));
127  coords.set(0, static_cast<int>(x));
128  return armnn::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
129 }
130 
131 // Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
132 inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info,
133  uint32_t depthIndex,
134  uint32_t batchIndex,
135  uint32_t channelIndex,
136  uint32_t y,
137  uint32_t x)
138 {
139  const arm_compute::TensorShape& shape = info.tensor_shape();
140  uint32_t width = static_cast<uint32_t>(shape[0]);
141  uint32_t height = static_cast<uint32_t>(shape[1]);
142  uint32_t numChannels = static_cast<uint32_t>(shape[2]);
143  uint32_t numBatches = static_cast<uint32_t>(shape[3]);
144  return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
145 }
146 
147 template <typename T>
148 void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData)
149 {
150  // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
151  static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
152  {
153  const arm_compute::ITensorInfo& info = *srcTensor.info();
154  const arm_compute::TensorShape& shape = info.tensor_shape();
155  const uint8_t* const bufferPtr = srcTensor.buffer();
156  uint32_t width = static_cast<uint32_t>(shape[0]);
157  uint32_t height = static_cast<uint32_t>(shape[1]);
158  uint32_t numChannels = static_cast<uint32_t>(shape[2]);
159  uint32_t numBatches = static_cast<uint32_t>(shape[3]);
160  uint32_t depth = static_cast<uint32_t>(shape[4]);
161 
162  for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
163  {
164  for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
165  {
166  for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
167  {
168  for (unsigned int y = 0; y < height; ++y)
169  {
170  // Copies one row from arm_compute tensor buffer to linear memory buffer.
171  // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
172  memcpy(
173  dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
174  bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
175  width * sizeof(T));
176  }
177  }
178  }
179  }
180  }
181 }
182 
183 template <typename T>
184 void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor)
185 {
186  // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
187  static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
188  {
189  const arm_compute::ITensorInfo& info = *dstTensor.info();
190  const arm_compute::TensorShape& shape = info.tensor_shape();
191  uint8_t* const bufferPtr = dstTensor.buffer();
192  uint32_t width = static_cast<uint32_t>(shape[0]);
193  uint32_t height = static_cast<uint32_t>(shape[1]);
194  uint32_t numChannels = static_cast<uint32_t>(shape[2]);
195  uint32_t numBatches = static_cast<uint32_t>(shape[3]);
196  uint32_t depth = static_cast<uint32_t>(shape[4]);
197 
198  for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
199  {
200  for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
201  {
202  for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
203  {
204  for (unsigned int y = 0; y < height; ++y)
205  {
206  // Copies one row from linear memory buffer to arm_compute tensor buffer.
207  // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
208  memcpy(
209  bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
210  srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
211  width * sizeof(T));
212  }
213  }
214  }
215  }
216  }
217 }
218 
219 /// Construct a TensorShape object from an ArmCompute object based on arm_compute::Dimensions.
220 /// \tparam ArmComputeType Any type that implements the Dimensions interface
221 /// \tparam T Shape value type
222 /// \param shapelike An ArmCompute object that implements the Dimensions interface
223 /// \param initial A default value to initialise the shape with
224 /// \return A TensorShape object filled from the Acl shapelike object.
225 template<typename ArmComputeType, typename T>
226 TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
227 {
228  std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
229  for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
230  {
231  s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
232  }
233  return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
234 };
235 
236 /// Get the strides from an ACL strides object
237 inline TensorShape GetStrides(const arm_compute::Strides& strides)
238 {
239  return GetTensorShape(strides, 0U);
240 }
241 
242 /// Get the shape from an ACL shape object
243 inline TensorShape GetShape(const arm_compute::TensorShape& shape)
244 {
245  return GetTensorShape(shape, 1U);
246 }
247 
248 } // namespace armcomputetensorutils
249 } // namespace armnn
DataLayout
Definition: Types.hpp:49
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Copyright (c) 2021 ARM Limited and Contributors.
const armnnSerializer::Pooling2dDescriptor * Pooling2dDescriptor
DataType
Definition: Types.hpp:35
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
Definition: TensorUtils.cpp:19
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:18