ArmNN  NotReleased
NeonTensorHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Half.hpp>
8 
11 
12 #include <arm_compute/runtime/MemoryGroup.h>
13 #include <arm_compute/runtime/IMemoryGroup.h>
14 #include <arm_compute/runtime/Tensor.h>
15 #include <arm_compute/runtime/SubTensor.h>
16 #include <arm_compute/core/TensorShape.h>
17 #include <arm_compute/core/Coordinates.h>
18 
19 #include <boost/polymorphic_pointer_cast.hpp>
20 
21 namespace armnn
22 {
23 
25 {
26 public:
27  NeonTensorHandle(const TensorInfo& tensorInfo)
28  : m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Malloc)),
29  m_Imported(false),
30  m_IsImportEnabled(false)
31  {
32  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
33  }
34 
35  NeonTensorHandle(const TensorInfo& tensorInfo,
36  DataLayout dataLayout,
37  MemorySourceFlags importFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc))
38  : m_ImportFlags(importFlags),
39  m_Imported(false),
40  m_IsImportEnabled(false)
41 
42  {
43  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
44  }
45 
46  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
47  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
48 
49  virtual void Allocate() override
50  {
51  // If we have enabled Importing, don't Allocate the tensor
52  if (!m_IsImportEnabled)
53  {
54  armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
55  }
56  };
57 
58  virtual void Manage() override
59  {
60  // If we have enabled Importing, don't manage the tensor
61  if (!m_IsImportEnabled)
62  {
63  BOOST_ASSERT(m_MemoryGroup != nullptr);
64  m_MemoryGroup->manage(&m_Tensor);
65  }
66  }
67 
68  virtual ITensorHandle* GetParent() const override { return nullptr; }
69 
70  virtual arm_compute::DataType GetDataType() const override
71  {
72  return m_Tensor.info()->data_type();
73  }
74 
75  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override
76  {
77  m_MemoryGroup = boost::polymorphic_pointer_downcast<arm_compute::MemoryGroup>(memoryGroup);
78  }
79 
80  virtual const void* Map(bool /* blocking = true */) const override
81  {
82  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
83  }
84 
85  virtual void Unmap() const override {}
86 
87  TensorShape GetStrides() const override
88  {
89  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
90  }
91 
92  TensorShape GetShape() const override
93  {
94  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
95  }
96 
98  {
99  m_ImportFlags = importFlags;
100  }
101 
103  {
104  return m_ImportFlags;
105  }
106 
107  void SetImportEnabledFlag(bool importEnabledFlag)
108  {
109  m_IsImportEnabled = importEnabledFlag;
110  }
111 
112  virtual bool Import(void* memory, MemorySource source) override
113  {
114  if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
115  {
116  if (source == MemorySource::Malloc && m_IsImportEnabled)
117  {
118  // Checks the 16 byte memory alignment
119  constexpr uintptr_t alignment = sizeof(size_t);
120  if (reinterpret_cast<uintptr_t>(memory) % alignment)
121  {
122  throw MemoryImportException("NeonTensorHandle::Import Attempting to import unaligned memory");
123  }
124 
125  // m_Tensor not yet Allocated
126  if (!m_Imported && !m_Tensor.buffer())
127  {
128  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
129  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
130  // with the Status error message
131  m_Imported = bool(status);
132  if (!m_Imported)
133  {
134  throw MemoryImportException(status.error_description());
135  }
136  return m_Imported;
137  }
138 
139  // m_Tensor.buffer() initially allocated with Allocate().
140  if (!m_Imported && m_Tensor.buffer())
141  {
142  throw MemoryImportException(
143  "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
144  }
145 
146  // m_Tensor.buffer() previously imported.
147  if (m_Imported)
148  {
149  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
150  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
151  // with the Status error message
152  m_Imported = bool(status);
153  if (!m_Imported)
154  {
155  throw MemoryImportException(status.error_description());
156  }
157  return m_Imported;
158  }
159  }
160  }
161  return false;
162  }
163 
164 private:
165  // Only used for testing
166  void CopyOutTo(void* memory) const override
167  {
168  switch (this->GetDataType())
169  {
170  case arm_compute::DataType::F32:
171  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
172  static_cast<float*>(memory));
173  break;
174  case arm_compute::DataType::U8:
175  case arm_compute::DataType::QASYMM8:
176  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
177  static_cast<uint8_t*>(memory));
178  break;
179  case arm_compute::DataType::F16:
180  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
181  static_cast<armnn::Half*>(memory));
182  break;
183  case arm_compute::DataType::S16:
184  case arm_compute::DataType::QSYMM16:
185  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
186  static_cast<int16_t*>(memory));
187  break;
188  case arm_compute::DataType::S32:
189  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
190  static_cast<int32_t*>(memory));
191  break;
192  default:
193  {
195  }
196  }
197  }
198 
199  // Only used for testing
200  void CopyInFrom(const void* memory) override
201  {
202  switch (this->GetDataType())
203  {
204  case arm_compute::DataType::F32:
205  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
206  this->GetTensor());
207  break;
208  case arm_compute::DataType::U8:
209  case arm_compute::DataType::QASYMM8:
210  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
211  this->GetTensor());
212  break;
213  case arm_compute::DataType::F16:
214  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
215  this->GetTensor());
216  break;
217  case arm_compute::DataType::S16:
218  case arm_compute::DataType::QSYMM16:
219  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
220  this->GetTensor());
221  break;
222  case arm_compute::DataType::S32:
223  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
224  this->GetTensor());
225  break;
226  default:
227  {
229  }
230  }
231  }
232 
233  arm_compute::Tensor m_Tensor;
234  std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
235  MemorySourceFlags m_ImportFlags;
236  bool m_Imported;
237  bool m_IsImportEnabled;
238 };
239 
241 {
242 public:
244  const arm_compute::TensorShape& shape,
245  const arm_compute::Coordinates& coords)
246  : m_Tensor(&parent->GetTensor(), shape, coords)
247  {
248  parentHandle = parent;
249  }
250 
251  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
252  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
253 
254  virtual void Allocate() override {}
255  virtual void Manage() override {}
256 
257  virtual ITensorHandle* GetParent() const override { return parentHandle; }
258 
259  virtual arm_compute::DataType GetDataType() const override
260  {
261  return m_Tensor.info()->data_type();
262  }
263 
264  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
265 
266  virtual const void* Map(bool /* blocking = true */) const override
267  {
268  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
269  }
270  virtual void Unmap() const override {}
271 
272  TensorShape GetStrides() const override
273  {
274  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
275  }
276 
277  TensorShape GetShape() const override
278  {
279  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
280  }
281 
282 private:
283  // Only used for testing
284  void CopyOutTo(void* memory) const override
285  {
286  switch (this->GetDataType())
287  {
288  case arm_compute::DataType::F32:
289  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
290  static_cast<float*>(memory));
291  break;
292  case arm_compute::DataType::U8:
293  case arm_compute::DataType::QASYMM8:
294  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
295  static_cast<uint8_t*>(memory));
296  break;
297  case arm_compute::DataType::S16:
298  case arm_compute::DataType::QSYMM16:
299  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
300  static_cast<int16_t*>(memory));
301  break;
302  case arm_compute::DataType::S32:
303  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
304  static_cast<int32_t*>(memory));
305  break;
306  default:
307  {
309  }
310  }
311  }
312 
313  // Only used for testing
314  void CopyInFrom(const void* memory) override
315  {
316  switch (this->GetDataType())
317  {
318  case arm_compute::DataType::F32:
319  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
320  this->GetTensor());
321  break;
322  case arm_compute::DataType::U8:
323  case arm_compute::DataType::QASYMM8:
324  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
325  this->GetTensor());
326  break;
327  case arm_compute::DataType::S16:
328  case arm_compute::DataType::QSYMM16:
329  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
330  this->GetTensor());
331  break;
332  case arm_compute::DataType::S32:
333  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
334  this->GetTensor());
335  break;
336  default:
337  {
339  }
340  }
341  }
342 
343  arm_compute::SubTensor m_Tensor;
344  ITensorHandle* parentHandle = nullptr;
345 };
346 
347 } // namespace armnn
TensorShape GetShape() const override
virtual const void * Map(bool) const override
arm_compute::ITensor & GetTensor() override
Status
Definition: Types.hpp:26
virtual void Unmap() const override
Unmap the tensor data.
virtual void Allocate() override
TensorShape GetStrides() const override
DataLayout::NHWC false
TensorShape GetStrides() const override
virtual void Unmap() const override
Unmap the tensor data.
virtual bool Import(void *memory, MemorySource source) override
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
arm_compute::ITensor & GetTensor() override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
unsigned int MemorySourceFlags
TensorShape GetShape() const override
virtual void Allocate() override
virtual arm_compute::DataType GetDataType() const override
virtual void Manage() override
DataLayout
Definition: Types.hpp:48
arm_compute::ITensor const & GetTensor() const override
DataType
Definition: Types.hpp:32
virtual void Manage() override
arm_compute::ITensor const & GetTensor() const override
void SetImportFlags(MemorySourceFlags importFlags)
void SetImportEnabledFlag(bool importEnabledFlag)
virtual ITensorHandle * GetParent() const override
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual const void * Map(bool) const override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
virtual ITensorHandle * GetParent() const override
virtual arm_compute::DataType GetDataType() const override
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)