ArmNN
 22.02
NeonTensorHandle Class Reference

#include <NeonTensorHandle.hpp>

Inheritance diagram for NeonTensorHandle:
IAclTensorHandle ITensorHandle

Public Member Functions

 NeonTensorHandle (const TensorInfo &tensorInfo)
 
 NeonTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
 
arm_compute::ITensor & GetTensor () override
 
arm_compute::ITensor const & GetTensor () const override
 
virtual void Allocate () override
 Indicate to the memory manager that this resource is no longer active. More...
 
virtual void Manage () override
 Indicate to the memory manager that this resource is active. More...
 
virtual ITensorHandleGetParent () const override
 Get the parent tensor if this is a subtensor. More...
 
virtual arm_compute::DataType GetDataType () const override
 
virtual void SetMemoryGroup (const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
 
virtual const void * Map (bool) const override
 Map the tensor data for access. More...
 
virtual void Unmap () const override
 Unmap the tensor data. More...
 
TensorShape GetStrides () const override
 Get the strides for each dimension ordered from largest to smallest where the smallest value is the same as the size of a single element in the tensor. More...
 
TensorShape GetShape () const override
 Get the number of elements for each dimension ordered from slowest iterating dimension to fastest iterating dimension. More...
 
void SetImportFlags (MemorySourceFlags importFlags)
 
MemorySourceFlags GetImportFlags () const override
 Get flags describing supported import sources. More...
 
void SetImportEnabledFlag (bool importEnabledFlag)
 
bool CanBeImported (void *memory, MemorySource source) override
 Implementations must determine if this memory block can be imported. More...
 
virtual bool Import (void *memory, MemorySource source) override
 Import externally allocated memory. More...
 
- Public Member Functions inherited from ITensorHandle
virtual ~ITensorHandle ()
 
void * Map (bool blocking=true)
 Map the tensor data for access. More...
 
void Unmap ()
 Unmap the tensor data that was previously mapped with call to Map(). More...
 
virtual void Unimport ()
 Unimport externally allocated memory. More...
 

Detailed Description

Definition at line 26 of file NeonTensorHandle.hpp.

Constructor & Destructor Documentation

◆ NeonTensorHandle() [1/2]

NeonTensorHandle ( const TensorInfo tensorInfo)
inline

Definition at line 29 of file NeonTensorHandle.hpp.

30  : m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Malloc)),
31  m_Imported(false),
32  m_IsImportEnabled(false),
33  m_TypeAlignment(GetDataTypeSize(tensorInfo.GetDataType()))
34  {
35  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
36  }
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:151

◆ NeonTensorHandle() [2/2]

NeonTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
MemorySourceFlags  importFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc) 
)
inline

Definition at line 38 of file NeonTensorHandle.hpp.

41  : m_ImportFlags(importFlags),
42  m_Imported(false),
43  m_IsImportEnabled(false),
44  m_TypeAlignment(GetDataTypeSize(tensorInfo.GetDataType()))
45 
46 
47  {
48  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
49  }
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:151

Member Function Documentation

◆ Allocate()

virtual void Allocate ( )
inlineoverridevirtual

Indicate to the memory manager that this resource is no longer active.

This is used to compute overlapping lifetimes of resources.

Implements ITensorHandle.

Definition at line 54 of file NeonTensorHandle.hpp.

55  {
56  // If we have enabled Importing, don't Allocate the tensor
57  if (!m_IsImportEnabled)
58  {
59  armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
60  }
61  };

◆ CanBeImported()

bool CanBeImported ( void *  memory,
MemorySource  source 
)
inlineoverridevirtual

Implementations must determine if this memory block can be imported.

This might be based on alignment or memory source type.

Returns
true if this memory can be imported.
false by default, cannot be imported.

Reimplemented from ITensorHandle.

Definition at line 117 of file NeonTensorHandle.hpp.

References armnn::IgnoreUnused().

Referenced by NeonTensorHandle::Import().

118  {
119  armnn::IgnoreUnused(source);
120  if (reinterpret_cast<uintptr_t>(memory) % m_TypeAlignment)
121  {
122  return false;
123  }
124  return true;
125  }
void IgnoreUnused(Ts &&...)

◆ GetDataType()

virtual arm_compute::DataType GetDataType ( ) const
inlineoverridevirtual

Implements IAclTensorHandle.

Definition at line 75 of file NeonTensorHandle.hpp.

Referenced by NeonTensorHandle::Import().

76  {
77  return m_Tensor.info()->data_type();
78  }

◆ GetImportFlags()

MemorySourceFlags GetImportFlags ( ) const
inlineoverridevirtual

Get flags describing supported import sources.

Reimplemented from ITensorHandle.

Definition at line 107 of file NeonTensorHandle.hpp.

108  {
109  return m_ImportFlags;
110  }

◆ GetParent()

virtual ITensorHandle* GetParent ( ) const
inlineoverridevirtual

Get the parent tensor if this is a subtensor.

Returns
a pointer to the parent tensor. Otherwise nullptr if not a subtensor.

Implements ITensorHandle.

Definition at line 73 of file NeonTensorHandle.hpp.

73 { return nullptr; }

◆ GetShape()

TensorShape GetShape ( ) const
inlineoverridevirtual

Get the number of elements for each dimension ordered from slowest iterating dimension to fastest iterating dimension.

Returns
a TensorShape filled with the number of elements for each dimension.

Implements ITensorHandle.

Definition at line 97 of file NeonTensorHandle.hpp.

Referenced by NeonRankWorkload::Execute().

98  {
99  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
100  }

◆ GetStrides()

TensorShape GetStrides ( ) const
inlineoverridevirtual

Get the strides for each dimension ordered from largest to smallest where the smallest value is the same as the size of a single element in the tensor.

Returns
a TensorShape filled with the strides for each dimension

Implements ITensorHandle.

Definition at line 92 of file NeonTensorHandle.hpp.

93  {
94  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
95  }

◆ GetTensor() [1/2]

arm_compute::ITensor& GetTensor ( )
inlineoverridevirtual

Implements IAclTensorHandle.

Definition at line 51 of file NeonTensorHandle.hpp.

51 { return m_Tensor; }

◆ GetTensor() [2/2]

arm_compute::ITensor const& GetTensor ( ) const
inlineoverridevirtual

Implements IAclTensorHandle.

Definition at line 52 of file NeonTensorHandle.hpp.

52 { return m_Tensor; }

◆ Import()

virtual bool Import ( void *  memory,
MemorySource  source 
)
inlineoverridevirtual

Import externally allocated memory.

Parameters
memorybase address of the memory being imported.
sourcesource of the allocation for the memory being imported.
Returns
true on success or false on failure

Reimplemented from ITensorHandle.

Definition at line 127 of file NeonTensorHandle.hpp.

References NeonTensorHandle::CanBeImported(), NeonTensorHandle::GetDataType(), and armnn::Malloc.

128  {
129  if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
130  {
131  if (source == MemorySource::Malloc && m_IsImportEnabled)
132  {
133  if (!CanBeImported(memory, source))
134  {
135  throw MemoryImportException("NeonTensorHandle::Import Attempting to import unaligned memory");
136  }
137 
138  // m_Tensor not yet Allocated
139  if (!m_Imported && !m_Tensor.buffer())
140  {
141  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
142  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
143  // with the Status error message
144  m_Imported = bool(status);
145  if (!m_Imported)
146  {
147  throw MemoryImportException(status.error_description());
148  }
149  return m_Imported;
150  }
151 
152  // m_Tensor.buffer() initially allocated with Allocate().
153  if (!m_Imported && m_Tensor.buffer())
154  {
155  throw MemoryImportException(
156  "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
157  }
158 
159  // m_Tensor.buffer() previously imported.
160  if (m_Imported)
161  {
162  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
163  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
164  // with the Status error message
165  m_Imported = bool(status);
166  if (!m_Imported)
167  {
168  throw MemoryImportException(status.error_description());
169  }
170  return m_Imported;
171  }
172  }
173  else
174  {
175  throw MemoryImportException("NeonTensorHandle::Import is disabled");
176  }
177  }
178  else
179  {
180  throw MemoryImportException("NeonTensorHandle::Incorrect import flag");
181  }
182  return false;
183  }
bool CanBeImported(void *memory, MemorySource source) override
Implementations must determine if this memory block can be imported.
Status
enumeration
Definition: Types.hpp:29

◆ Manage()

virtual void Manage ( )
inlineoverridevirtual

Indicate to the memory manager that this resource is active.

This is used to compute overlapping lifetimes of resources.

Implements ITensorHandle.

Definition at line 63 of file NeonTensorHandle.hpp.

References ARMNN_ASSERT.

64  {
65  // If we have enabled Importing, don't manage the tensor
66  if (!m_IsImportEnabled)
67  {
68  ARMNN_ASSERT(m_MemoryGroup != nullptr);
69  m_MemoryGroup->manage(&m_Tensor);
70  }
71  }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ Map()

virtual const void* Map ( bool  blocking) const
inlineoverridevirtual

Map the tensor data for access.

Parameters
blockinghint to block the calling thread until all other accesses are complete. (backend dependent)
Returns
pointer to the first element of the mapped data.

Implements ITensorHandle.

Definition at line 85 of file NeonTensorHandle.hpp.

86  {
87  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
88  }

◆ SetImportEnabledFlag()

void SetImportEnabledFlag ( bool  importEnabledFlag)
inline

Definition at line 112 of file NeonTensorHandle.hpp.

113  {
114  m_IsImportEnabled = importEnabledFlag;
115  }

◆ SetImportFlags()

void SetImportFlags ( MemorySourceFlags  importFlags)
inline

Definition at line 102 of file NeonTensorHandle.hpp.

103  {
104  m_ImportFlags = importFlags;
105  }

◆ SetMemoryGroup()

virtual void SetMemoryGroup ( const std::shared_ptr< arm_compute::IMemoryGroup > &  memoryGroup)
inlineoverridevirtual

Implements IAclTensorHandle.

Definition at line 80 of file NeonTensorHandle.hpp.

81  {
82  m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
83  }

◆ Unmap()

virtual void Unmap ( ) const
inlineoverridevirtual

Unmap the tensor data.

Implements ITensorHandle.

Definition at line 90 of file NeonTensorHandle.hpp.

90 {}

The documentation for this class was generated from the following file: