ArmNN  NotReleased
ArmComputeTensorUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Tensor.hpp>
9 
10 #include <arm_compute/core/ITensor.h>
11 #include <arm_compute/core/TensorInfo.h>
12 #include <arm_compute/core/Types.h>
13 #include <arm_compute/core/Size2D.h>
14 
15 #include <Half.hpp>
16 
17 #include <boost/cast.hpp>
18 
19 namespace armnn
20 {
21 class ITensorHandle;
22 
23 namespace armcomputetensorutils
24 {
25 
27 arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
28 
30 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
31  unsigned int originalInputRank,
32  const std::vector<unsigned int>& armnnAxes);
33 
35 arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape);
36 
39 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
40 
44 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
45  armnn::DataLayout dataLayout);
46 
49 arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
50 
54 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
55  bool fpMixedPrecision = false);
56 
58 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc);
59 
61 arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector);
62 
64 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
65 
67 arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue);
68 
70 template <typename Descriptor>
71 arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descriptor)
72 {
73  return arm_compute::PadStrideInfo(descriptor.m_StrideX,
74  descriptor.m_StrideY,
75  descriptor.m_PadLeft,
76  descriptor.m_PadRight,
77  descriptor.m_PadTop,
78  descriptor.m_PadBottom,
79  arm_compute::DimensionRoundingType::FLOOR);
80 }
81 
83 template <typename Tensor>
84 void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo)
85 {
86  tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
87 }
88 
90 template <typename Tensor>
91 void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
92 {
93  tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
94 }
95 
96 template <typename Tensor>
97 void InitialiseArmComputeTensorEmpty(Tensor& tensor)
98 {
99  tensor.allocator()->allocate();
100 }
101 
103 template <typename Tensor>
104 void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
105 {
106  if (tensor && !tensor->is_used())
107  {
108  tensor.reset(nullptr);
109  }
110 }
111 
112 // Helper function to obtain byte offset into tensor data
113 inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
114  uint32_t depthIndex,
115  uint32_t batchIndex,
116  uint32_t channelIndex,
117  uint32_t y,
118  uint32_t x)
119 {
121  coords.set(4, static_cast<int>(depthIndex));
122  coords.set(3, static_cast<int>(batchIndex));
123  coords.set(2, static_cast<int>(channelIndex));
124  coords.set(1, static_cast<int>(y));
125  coords.set(0, static_cast<int>(x));
126  return boost::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
127 }
128 
129 // Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
130 inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info,
131  uint32_t depthIndex,
132  uint32_t batchIndex,
133  uint32_t channelIndex,
134  uint32_t y,
135  uint32_t x)
136 {
137  const arm_compute::TensorShape& shape = info.tensor_shape();
138  uint32_t width = static_cast<uint32_t>(shape[0]);
139  uint32_t height = static_cast<uint32_t>(shape[1]);
140  uint32_t numChannels = static_cast<uint32_t>(shape[2]);
141  uint32_t numBatches = static_cast<uint32_t>(shape[3]);
142  return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
143 }
144 
145 template <typename T>
146 void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData)
147 {
148  // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
149  static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
150  {
151  const arm_compute::ITensorInfo& info = *srcTensor.info();
152  const arm_compute::TensorShape& shape = info.tensor_shape();
153  const uint8_t* const bufferPtr = srcTensor.buffer();
154  uint32_t width = static_cast<uint32_t>(shape[0]);
155  uint32_t height = static_cast<uint32_t>(shape[1]);
156  uint32_t numChannels = static_cast<uint32_t>(shape[2]);
157  uint32_t numBatches = static_cast<uint32_t>(shape[3]);
158  uint32_t depth = static_cast<uint32_t>(shape[4]);
159 
160  for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
161  {
162  for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
163  {
164  for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
165  {
166  for (unsigned int y = 0; y < height; ++y)
167  {
168  // Copies one row from arm_compute tensor buffer to linear memory buffer.
169  // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
170  memcpy(
171  dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
172  bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
173  width * sizeof(T));
174  }
175  }
176  }
177  }
178  }
179 }
180 
181 template <typename T>
182 void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor)
183 {
184  // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
185  static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
186  {
187  const arm_compute::ITensorInfo& info = *dstTensor.info();
188  const arm_compute::TensorShape& shape = info.tensor_shape();
189  uint8_t* const bufferPtr = dstTensor.buffer();
190  uint32_t width = static_cast<uint32_t>(shape[0]);
191  uint32_t height = static_cast<uint32_t>(shape[1]);
192  uint32_t numChannels = static_cast<uint32_t>(shape[2]);
193  uint32_t numBatches = static_cast<uint32_t>(shape[3]);
194  uint32_t depth = static_cast<uint32_t>(shape[4]);
195 
196  for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
197  {
198  for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
199  {
200  for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
201  {
202  for (unsigned int y = 0; y < height; ++y)
203  {
204  // Copies one row from linear memory buffer to arm_compute tensor buffer.
205  // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
206  memcpy(
207  bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
208  srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
209  width * sizeof(T));
210  }
211  }
212  }
213  }
214  }
215 }
216 
223 template<typename ArmComputeType, typename T>
224 TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
225 {
226  std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
227  for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
228  {
229  s[(shapelike.num_dimensions()-1)-i] = boost::numeric_cast<unsigned int>(shapelike[i]);
230  }
231  return TensorShape(boost::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
232 };
233 
235 inline TensorShape GetStrides(const arm_compute::Strides& strides)
236 {
237  return GetTensorShape(strides, 0U);
238 }
239 
241 inline TensorShape GetShape(const arm_compute::TensorShape& shape)
242 {
243  return GetTensorShape(shape, 1U);
244 }
245 
246 } // namespace armcomputetensorutils
247 } // namespace armnn
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
Definition: TensorUtils.cpp:19
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:18
DataLayout
Definition: Types.hpp:48
DataType
Definition: Types.hpp:32