ArmNN
 24.02
NeonTensorHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <BFloat16.hpp>
9 #include <Half.hpp>
10 
11 #include <armnn/utility/Assert.hpp>
12 
16 
17 #include <arm_compute/runtime/MemoryGroup.h>
18 #include <arm_compute/runtime/IMemoryGroup.h>
19 #include <arm_compute/runtime/Tensor.h>
20 #include <arm_compute/runtime/SubTensor.h>
21 #include <arm_compute/core/TensorShape.h>
22 #include <arm_compute/core/Coordinates.h>
23 #include "armnn/TypesUtils.hpp"
24 
25 namespace armnn
26 {
27 class NeonTensorHandleDecorator;
28 
30 {
31 public:
32  NeonTensorHandle(const TensorInfo& tensorInfo)
33  : m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Malloc)),
34  m_Imported(false),
35  m_IsImportEnabled(false),
36  m_TypeAlignment(GetDataTypeSize(tensorInfo.GetDataType()))
37  {
38  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
39  }
40 
41  NeonTensorHandle(const TensorInfo& tensorInfo,
42  DataLayout dataLayout,
43  MemorySourceFlags importFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc))
44  : m_ImportFlags(importFlags),
45  m_Imported(false),
46  m_IsImportEnabled(false),
47  m_TypeAlignment(GetDataTypeSize(tensorInfo.GetDataType()))
48 
49 
50  {
51  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
52  }
53 
54  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
55  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
56 
57  virtual void Allocate() override
58  {
59  // If we have enabled Importing, don't Allocate the tensor
60  if (!m_IsImportEnabled)
61  {
62  armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
63  }
64  };
65 
66  virtual void Manage() override
67  {
68  // If we have enabled Importing, don't manage the tensor
69  if (!m_IsImportEnabled)
70  {
71  ARMNN_ASSERT(m_MemoryGroup != nullptr);
72  m_MemoryGroup->manage(&m_Tensor);
73  }
74  }
75 
76  virtual ITensorHandle* GetParent() const override { return nullptr; }
77 
78  virtual arm_compute::DataType GetDataType() const override
79  {
80  return m_Tensor.info()->data_type();
81  }
82 
83  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override
84  {
85  m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
86  }
87 
88  virtual const void* Map(bool /* blocking = true */) const override
89  {
90  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
91  }
92 
93  virtual void Unmap() const override {}
94 
95  TensorShape GetStrides() const override
96  {
97  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
98  }
99 
100  TensorShape GetShape() const override
101  {
102  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
103  }
104 
106  {
107  m_ImportFlags = importFlags;
108  }
109 
111  {
112  return m_ImportFlags;
113  }
114 
115  void SetImportEnabledFlag(bool importEnabledFlag)
116  {
117  m_IsImportEnabled = importEnabledFlag;
118  }
119 
120  bool CanBeImported(void* memory, MemorySource source) override
121  {
122  if (source != MemorySource::Malloc || reinterpret_cast<uintptr_t>(memory) % m_TypeAlignment)
123  {
124  return false;
125  }
126  return true;
127  }
128 
129  virtual bool Import(void* memory, MemorySource source) override
130  {
131  if (m_ImportFlags& static_cast<MemorySourceFlags>(source))
132  {
133  if (source == MemorySource::Malloc && m_IsImportEnabled)
134  {
135  if (!CanBeImported(memory, source))
136  {
137  throw MemoryImportException("NeonTensorHandle::Import Attempting to import unaligned memory");
138  }
139 
140  // m_Tensor not yet Allocated
141  if (!m_Imported && !m_Tensor.buffer())
142  {
143  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
144  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
145  // with the Status error message
146  m_Imported = bool(status);
147  if (!m_Imported)
148  {
149  throw MemoryImportException(status.error_description());
150  }
151  return m_Imported;
152  }
153 
154  // m_Tensor.buffer() initially allocated with Allocate().
155  if (!m_Imported && m_Tensor.buffer())
156  {
157  throw MemoryImportException(
158  "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
159  }
160 
161  // m_Tensor.buffer() previously imported.
162  if (m_Imported)
163  {
164  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
165  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
166  // with the Status error message
167  m_Imported = bool(status);
168  if (!m_Imported)
169  {
170  throw MemoryImportException(status.error_description());
171  }
172  return m_Imported;
173  }
174  }
175  else
176  {
177  throw MemoryImportException("NeonTensorHandle::Import is disabled");
178  }
179  }
180  else
181  {
182  throw MemoryImportException("NeonTensorHandle::Incorrect import flag");
183  }
184  return false;
185  }
186 
187  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo& tensorInfo) override;
188 
189 private:
190  // Only used for testing
191  void CopyOutTo(void* memory) const override
192  {
193  switch (this->GetDataType())
194  {
195  case arm_compute::DataType::F32:
196  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
197  static_cast<float*>(memory));
198  break;
199  case arm_compute::DataType::U8:
200  case arm_compute::DataType::QASYMM8:
201  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
202  static_cast<uint8_t*>(memory));
203  break;
204  case arm_compute::DataType::QSYMM8:
205  case arm_compute::DataType::QASYMM8_SIGNED:
206  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
207  static_cast<int8_t*>(memory));
208  break;
209  case arm_compute::DataType::BFLOAT16:
210  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
211  static_cast<armnn::BFloat16*>(memory));
212  break;
213  case arm_compute::DataType::F16:
214  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
215  static_cast<armnn::Half*>(memory));
216  break;
217  case arm_compute::DataType::S16:
218  case arm_compute::DataType::QSYMM16:
219  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
220  static_cast<int16_t*>(memory));
221  break;
222  case arm_compute::DataType::S32:
223  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
224  static_cast<int32_t*>(memory));
225  break;
226  default:
227  {
229  }
230  }
231  }
232 
233  // Only used for testing
234  void CopyInFrom(const void* memory) override
235  {
236  switch (this->GetDataType())
237  {
238  case arm_compute::DataType::F32:
239  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
240  this->GetTensor());
241  break;
242  case arm_compute::DataType::U8:
243  case arm_compute::DataType::QASYMM8:
244  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
245  this->GetTensor());
246  break;
247  case arm_compute::DataType::QSYMM8:
248  case arm_compute::DataType::QASYMM8_SIGNED:
249  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
250  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
251  this->GetTensor());
252  break;
253  case arm_compute::DataType::BFLOAT16:
254  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
255  this->GetTensor());
256  break;
257  case arm_compute::DataType::F16:
258  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
259  this->GetTensor());
260  break;
261  case arm_compute::DataType::S16:
262  case arm_compute::DataType::QSYMM16:
263  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
264  this->GetTensor());
265  break;
266  case arm_compute::DataType::S32:
267  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
268  this->GetTensor());
269  break;
270  default:
271  {
273  }
274  }
275  }
276 
277  arm_compute::Tensor m_Tensor;
278  std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
279  MemorySourceFlags m_ImportFlags;
280  bool m_Imported;
281  bool m_IsImportEnabled;
282  const uintptr_t m_TypeAlignment;
283  std::vector<std::shared_ptr<NeonTensorHandleDecorator>> m_Decorated;
284 };
285 
287 {
288 public:
290  const arm_compute::TensorShape& shape,
291  const arm_compute::Coordinates& coords)
292  : m_Tensor(&parent->GetTensor(), shape, coords, true)
293  {
294  parentHandle = parent;
295  }
296 
297  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
298  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
299 
300  virtual void Allocate() override {}
301  virtual void Manage() override {}
302 
303  virtual ITensorHandle* GetParent() const override { return parentHandle; }
304 
305  virtual arm_compute::DataType GetDataType() const override
306  {
307  return m_Tensor.info()->data_type();
308  }
309 
310  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
311 
312  virtual const void* Map(bool /* blocking = true */) const override
313  {
314  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
315  }
316  virtual void Unmap() const override {}
317 
318  TensorShape GetStrides() const override
319  {
320  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
321  }
322 
323  TensorShape GetShape() const override
324  {
325  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
326  }
327 
328  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo&) override
329  {
330  return nullptr;
331  };
332 
333 private:
334  // Only used for testing
335  void CopyOutTo(void* memory) const override
336  {
337  switch (this->GetDataType())
338  {
339  case arm_compute::DataType::F32:
340  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
341  static_cast<float*>(memory));
342  break;
343  case arm_compute::DataType::U8:
344  case arm_compute::DataType::QASYMM8:
345  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
346  static_cast<uint8_t*>(memory));
347  break;
348  case arm_compute::DataType::QSYMM8:
349  case arm_compute::DataType::QASYMM8_SIGNED:
350  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
351  static_cast<int8_t*>(memory));
352  break;
353  case arm_compute::DataType::S16:
354  case arm_compute::DataType::QSYMM16:
355  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
356  static_cast<int16_t*>(memory));
357  break;
358  case arm_compute::DataType::S32:
359  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
360  static_cast<int32_t*>(memory));
361  break;
362  default:
363  {
365  }
366  }
367  }
368 
369  // Only used for testing
370  void CopyInFrom(const void* memory) override
371  {
372  switch (this->GetDataType())
373  {
374  case arm_compute::DataType::F32:
375  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
376  this->GetTensor());
377  break;
378  case arm_compute::DataType::U8:
379  case arm_compute::DataType::QASYMM8:
380  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
381  this->GetTensor());
382  break;
383  case arm_compute::DataType::QSYMM8:
384  case arm_compute::DataType::QASYMM8_SIGNED:
385  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
386  this->GetTensor());
387  break;
388  case arm_compute::DataType::S16:
389  case arm_compute::DataType::QSYMM16:
390  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
391  this->GetTensor());
392  break;
393  case arm_compute::DataType::S32:
394  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
395  this->GetTensor());
396  break;
397  default:
398  {
400  }
401  }
402  }
403 
404  arm_compute::SubTensor m_Tensor;
405  ITensorHandle* parentHandle = nullptr;
406 };
407 
408 /// NeonTensorDecorator wraps an existing Neon tensor allowing us to override the TensorInfo for it
409 class NeonTensorDecorator : public arm_compute::ITensor
410 {
411 public:
413 
414  NeonTensorDecorator(arm_compute::ITensor* original, const TensorInfo& info);
415 
416  ~NeonTensorDecorator() = default;
417 
418  NeonTensorDecorator(const NeonTensorDecorator&) = delete;
419 
421 
423 
425 
426  // Inherited methods overridden:
427  arm_compute::ITensorInfo* info() const override;
428 
429  arm_compute::ITensorInfo* info() override;
430 
431  uint8_t* buffer() const override;
432 
433 private:
434  arm_compute::ITensor* m_Original;
435  mutable arm_compute::TensorInfo m_TensorInfo;
436 };
437 
439 {
440 public:
442  : m_Tensor(&parent->GetTensor(), info)
443  {
444  parentHandle = parent;
445  }
446 
447  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
448  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
449 
450  virtual void Allocate() override {}
451  virtual void Manage() override {}
452 
453  virtual ITensorHandle* GetParent() const override { return nullptr; }
454 
455  virtual arm_compute::DataType GetDataType() const override
456  {
457  return m_Tensor.info()->data_type();
458  }
459 
460  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
461 
462  virtual const void* Map(bool /* blocking = true */) const override
463  {
464  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
465  }
466  virtual void Unmap() const override {}
467 
468  TensorShape GetStrides() const override
469  {
470  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
471  }
472 
473  TensorShape GetShape() const override
474  {
475  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
476  }
477 
478  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo&) override
479  {
480  return nullptr;
481  };
482 
483 private:
484  // Only used for testing
485  void CopyOutTo(void* memory) const override
486  {
487  switch (this->GetDataType())
488  {
489  case arm_compute::DataType::F32:
490  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
491  static_cast<float*>(memory));
492  break;
493  case arm_compute::DataType::U8:
494  case arm_compute::DataType::QASYMM8:
495  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
496  static_cast<uint8_t*>(memory));
497  break;
498  case arm_compute::DataType::QSYMM8:
499  case arm_compute::DataType::QASYMM8_SIGNED:
500  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
501  static_cast<int8_t*>(memory));
502  break;
503  case arm_compute::DataType::S16:
504  case arm_compute::DataType::QSYMM16:
505  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
506  static_cast<int16_t*>(memory));
507  break;
508  case arm_compute::DataType::S32:
509  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
510  static_cast<int32_t*>(memory));
511  break;
512  default:
513  {
515  }
516  }
517  }
518 
519  // Only used for testing
520  void CopyInFrom(const void* memory) override
521  {
522  switch (this->GetDataType())
523  {
524  case arm_compute::DataType::F32:
525  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
526  this->GetTensor());
527  break;
528  case arm_compute::DataType::U8:
529  case arm_compute::DataType::QASYMM8:
530  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
531  this->GetTensor());
532  break;
533  case arm_compute::DataType::QSYMM8:
534  case arm_compute::DataType::QASYMM8_SIGNED:
535  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
536  this->GetTensor());
537  break;
538  case arm_compute::DataType::S16:
539  case arm_compute::DataType::QSYMM16:
540  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
541  this->GetTensor());
542  break;
543  case arm_compute::DataType::S32:
544  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
545  this->GetTensor());
546  break;
547  default:
548  {
550  }
551  }
552  }
553 
554  NeonTensorDecorator m_Tensor;
555  ITensorHandle* parentHandle = nullptr;
556 };
557 
558 
559 } // namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::MemorySource::Malloc
@ Malloc
armnn::NeonTensorDecorator::~NeonTensorDecorator
~NeonTensorDecorator()=default
armnn::NeonSubTensorHandle::GetTensor
arm_compute::ITensor & GetTensor() override
Definition: NeonTensorHandle.hpp:297
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
TypesUtils.hpp
armnn::NeonSubTensorHandle::NeonSubTensorHandle
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
Definition: NeonTensorHandle.hpp:289
armnn::NeonSubTensorHandle::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: NeonTensorHandle.hpp:316
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::NeonTensorHandle::NeonTensorHandle
NeonTensorHandle(const TensorInfo &tensorInfo)
Definition: NeonTensorHandle.hpp:32
armnn::NeonTensorHandle::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: NeonTensorHandle.hpp:57
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::NeonSubTensorHandle::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.hpp:328
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::NeonTensorHandle::GetImportFlags
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
Definition: NeonTensorHandle.hpp:110
armnn::NeonSubTensorHandle::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: NeonTensorHandle.hpp:300
armnn::NeonSubTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:323
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::NeonTensorHandle::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: NeonTensorHandle.hpp:93
armnn::NeonSubTensorHandle::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
Definition: NeonTensorHandle.hpp:310
Assert.hpp
armnn::NeonTensorHandleDecorator::GetTensor
arm_compute::ITensor const & GetTensor() const override
Definition: NeonTensorHandle.hpp:448
armnn::NeonTensorDecorator::buffer
uint8_t * buffer() const override
Definition: NeonTensorHandle.cpp:42
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::NeonTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:100
armnn::NeonTensorDecorator
NeonTensorDecorator wraps an existing Neon tensor allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.hpp:409
armnn::NeonSubTensorHandle::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: NeonTensorHandle.hpp:301
armnn::NeonSubTensorHandle::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: NeonTensorHandle.hpp:318
armnn::NeonTensorHandleDecorator::GetTensor
arm_compute::ITensor & GetTensor() override
Definition: NeonTensorHandle.hpp:447
armnn::IAclTensorHandle
Definition: ArmComputeTensorHandle.hpp:16
PolymorphicDowncast.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::NeonTensorHandle::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: NeonTensorHandle.hpp:66
armnn::NeonTensorHandleDecorator::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: NeonTensorHandle.hpp:468
armnn::NeonTensorHandle::CanBeImported
bool CanBeImported(void *memory, MemorySource source) override
Implementations must determine if this memory block can be imported.
Definition: NeonTensorHandle.hpp:120
armnn::NeonTensorHandle::NeonTensorHandle
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
Definition: NeonTensorHandle.hpp:41
armnn::NeonTensorHandle::Import
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
Definition: NeonTensorHandle.hpp:129
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:182
armnn::NeonTensorHandle::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: NeonTensorHandle.hpp:78
armnn::NeonSubTensorHandle::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: NeonTensorHandle.hpp:303
armnn::BoostLogSeverityMapping::info
@ info
armnn::NeonTensorHandle::SetImportEnabledFlag
void SetImportEnabledFlag(bool importEnabledFlag)
Definition: NeonTensorHandle.hpp:115
ArmComputeTensorHandle.hpp
armnn::NeonTensorHandleDecorator::Map
virtual const void * Map(bool) const override
Map the tensor data for access.
Definition: NeonTensorHandle.hpp:462
armnn::NeonTensorHandleDecorator::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: NeonTensorHandle.hpp:466
Half.hpp
armnn::NeonTensorHandle::Map
virtual const void * Map(bool) const override
Map the tensor data for access.
Definition: NeonTensorHandle.hpp:88
armnn::NeonTensorHandleDecorator::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
Definition: NeonTensorHandle.hpp:460
armnn::NeonSubTensorHandle::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: NeonTensorHandle.hpp:305
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonTensorHandleDecorator::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: NeonTensorHandle.hpp:455
armnn::NeonSubTensorHandle
Definition: NeonTensorHandle.hpp:286
armnn::NeonTensorHandle::GetTensor
arm_compute::ITensor & GetTensor() override
Definition: NeonTensorHandle.hpp:54
armnn::MemoryImportException
Definition: Exceptions.hpp:125
armnn::BFloat16
Definition: BFloat16.hpp:15
armnn::MemorySource
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:244
armnn::NeonTensorDecorator::operator=
NeonTensorDecorator & operator=(const NeonTensorDecorator &)=delete
armnn::NeonTensorHandleDecorator::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: NeonTensorHandle.hpp:450
armnn::NeonTensorHandle::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: NeonTensorHandle.hpp:76
armnn::NeonSubTensorHandle::GetTensor
arm_compute::ITensor const & GetTensor() const override
Definition: NeonTensorHandle.hpp:298
armnn::NeonTensorHandleDecorator
Definition: NeonTensorHandle.hpp:438
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::NeonTensorHandle::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &tensorInfo) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.cpp:12
armnn::NeonTensorHandleDecorator::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: NeonTensorHandle.hpp:451
ArmComputeTensorUtils.hpp
armnn::NeonTensorHandleDecorator::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:473
armnn::NeonTensorDecorator::info
arm_compute::ITensorInfo * info() const override
Definition: NeonTensorHandle.cpp:32
armnn::NeonTensorHandle::GetTensor
arm_compute::ITensor const & GetTensor() const override
Definition: NeonTensorHandle.hpp:55
BFloat16.hpp
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::NeonTensorHandleDecorator::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: NeonTensorHandle.hpp:453
armnn::NeonTensorHandle::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
Definition: NeonTensorHandle.hpp:83
armnn::NeonTensorHandleDecorator::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.hpp:478
armnn::NeonTensorHandle::SetImportFlags
void SetImportFlags(MemorySourceFlags importFlags)
Definition: NeonTensorHandle.hpp:105
armnn::NeonTensorDecorator::NeonTensorDecorator
NeonTensorDecorator()
Definition: NeonTensorHandle.cpp:20
armnn::NeonSubTensorHandle::Map
virtual const void * Map(bool) const override
Map the tensor data for access.
Definition: NeonTensorHandle.hpp:312
armnn::NeonTensorHandle::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: NeonTensorHandle.hpp:95
armnn::NeonTensorHandleDecorator::NeonTensorHandleDecorator
NeonTensorHandleDecorator(IAclTensorHandle *parent, const TensorInfo &info)
Definition: NeonTensorHandle.hpp:441
armnn::NeonTensorHandle
Definition: NeonTensorHandle.hpp:29