ArmNN
 24.02
GpuFsaTensorHandle Class Reference

#include <GpuFsaTensorHandle.hpp>

Inheritance diagram for GpuFsaTensorHandle:
[legend]
Collaboration diagram for GpuFsaTensorHandle:
[legend]

Public Member Functions

 GpuFsaTensorHandle (const TensorInfo &tensorInfo)
 
 GpuFsaTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Undefined))
 
arm_compute::CLTensor & GetTensor () override
 
arm_compute::CLTensor const & GetTensor () const override
 
virtual void Allocate () override
 Indicate to the memory manager that this resource is no longer active. More...
 
virtual void Manage () override
 Indicate to the memory manager that this resource is active. More...
 
virtual const void * Map (bool blocking=true) const override
 Map the tensor data for access. More...
 
virtual void Unmap () const override
 Unmap the tensor data. More...
 
virtual ITensorHandleGetParent () const override
 Get the parent tensor if this is a subtensor. More...
 
virtual arm_compute::DataType GetDataType () const override
 
virtual void SetMemoryGroup (const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
 
TensorShape GetStrides () const override
 Get the strides for each dimension ordered from largest to smallest where the smallest value is the same as the size of a single element in the tensor. More...
 
TensorShape GetShape () const override
 Get the number of elements for each dimension ordered from slowest iterating dimension to fastest iterating dimension. More...
 
void SetImportFlags (MemorySourceFlags importFlags)
 
MemorySourceFlags GetImportFlags () const override
 Get flags describing supported import sources. More...
 
void SetImportEnabledFlag (bool importEnabledFlag)
 
virtual bool Import (void *, MemorySource source) override
 Import externally allocated memory. More...
 
virtual bool CanBeImported (void *, MemorySource) override
 Implementations must determine if this memory block can be imported. More...
 
- Public Member Functions inherited from ITensorHandle
virtual ~ITensorHandle ()
 
void * Map (bool blocking=true)
 Map the tensor data for access. More...
 
void Unmap ()
 Unmap the tensor data that was previously mapped with call to Map(). More...
 
virtual void Unimport ()
 Unimport externally allocated memory. More...
 
virtual std::shared_ptr< ITensorHandleDecorateTensorHandle (const TensorInfo &tensorInfo)
 Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it. More...
 

Detailed Description

Definition at line 25 of file GpuFsaTensorHandle.hpp.

Constructor & Destructor Documentation

◆ GpuFsaTensorHandle() [1/2]

GpuFsaTensorHandle ( const TensorInfo tensorInfo)
inline

Definition at line 28 of file GpuFsaTensorHandle.hpp.

29  : m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined)),
30  m_Imported(false),
31  m_IsImportEnabled(false)
32  {
33  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
34  }

References armnn::Undefined.

◆ GpuFsaTensorHandle() [2/2]

GpuFsaTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
MemorySourceFlags  importFlags = static_cast<MemorySourceFlags>(MemorySource::Undefined) 
)
inline

Definition at line 36 of file GpuFsaTensorHandle.hpp.

39  : m_ImportFlags(importFlags),
40  m_Imported(false),
41  m_IsImportEnabled(false)
42  {
43  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
44  }

Member Function Documentation

◆ Allocate()

void Allocate ( )
inlineoverridevirtual

Indicate to the memory manager that this resource is no longer active.

This is used to compute overlapping lifetimes of resources.

Implements ITensorHandle.

Definition at line 48 of file GpuFsaTensorHandle.hpp.

49  {
50  // If we have enabled Importing, don't allocate the tensor
51  if (m_IsImportEnabled)
52  {
53  throw MemoryImportException("GpuFsaTensorHandle::Attempting to allocate memory when importing");
54  }
55  else
56  {
57  armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
58  }
59 
60  }

◆ CanBeImported()

bool CanBeImported ( void *  memory,
MemorySource  source 
)
inlineoverridevirtual

Implementations must determine if this memory block can be imported.

This might be based on alignment or memory source type.

Returns
true if this memory can be imported.
false by default, cannot be imported.

Reimplemented from ITensorHandle.

Definition at line 131 of file GpuFsaTensorHandle.hpp.

132  {
133  // This TensorHandle can never import.
134  return false;
135  }

◆ GetDataType()

virtual arm_compute::DataType GetDataType ( ) const
inlineoverridevirtual

Implements IClTensorHandle.

Definition at line 86 of file GpuFsaTensorHandle.hpp.

87  {
88  return m_Tensor.info()->data_type();
89  }

◆ GetImportFlags()

MemorySourceFlags GetImportFlags ( ) const
inlineoverridevirtual

Get flags describing supported import sources.

Reimplemented from ITensorHandle.

Definition at line 111 of file GpuFsaTensorHandle.hpp.

112  {
113  return m_ImportFlags;
114  }

◆ GetParent()

virtual ITensorHandle* GetParent ( ) const
inlineoverridevirtual

Get the parent tensor if this is a subtensor.

Returns
a pointer to the parent tensor. Otherwise nullptr if not a subtensor.

Implements ITensorHandle.

Definition at line 84 of file GpuFsaTensorHandle.hpp.

84 { return nullptr; }

◆ GetShape()

TensorShape GetShape ( ) const
inlineoverridevirtual

Get the number of elements for each dimension ordered from slowest iterating dimension to fastest iterating dimension.

Returns
a TensorShape filled with the number of elements for each dimension.

Implements ITensorHandle.

Definition at line 101 of file GpuFsaTensorHandle.hpp.

102  {
103  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
104  }

◆ GetStrides()

TensorShape GetStrides ( ) const
inlineoverridevirtual

Get the strides for each dimension ordered from largest to smallest where the smallest value is the same as the size of a single element in the tensor.

Returns
a TensorShape filled with the strides for each dimension

Implements ITensorHandle.

Definition at line 96 of file GpuFsaTensorHandle.hpp.

97  {
98  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
99  }

◆ GetTensor() [1/2]

arm_compute::CLTensor const& GetTensor ( ) const
inlineoverridevirtual

Implements IClTensorHandle.

Definition at line 47 of file GpuFsaTensorHandle.hpp.

47 { return m_Tensor; }

◆ GetTensor() [2/2]

arm_compute::CLTensor& GetTensor ( )
inlineoverridevirtual

Implements IClTensorHandle.

Definition at line 46 of file GpuFsaTensorHandle.hpp.

46 { return m_Tensor; }

◆ Import()

bool Import ( void *  memory,
MemorySource  source 
)
inlineoverridevirtual

Import externally allocated memory.

Parameters
memorybase address of the memory being imported.
sourcesource of the allocation for the memory being imported.
Returns
true on success or false on failure

Reimplemented from ITensorHandle.

Definition at line 121 of file GpuFsaTensorHandle.hpp.

122  {
123  if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
124  {
125  throw MemoryImportException("GpuFsaTensorHandle::Incorrect import flag");
126  }
127  m_Imported = false;
128  return false;
129  }

◆ Manage()

void Manage ( )
inlineoverridevirtual

Indicate to the memory manager that this resource is active.

This is used to compute overlapping lifetimes of resources.

Implements ITensorHandle.

Definition at line 62 of file GpuFsaTensorHandle.hpp.

63  {
64  // If we have enabled Importing, don't manage the tensor
65  if (m_IsImportEnabled)
66  {
67  throw MemoryImportException("GpuFsaTensorHandle::Attempting to manage memory when importing");
68  }
69  else
70  {
71  assert(m_MemoryGroup != nullptr);
72  m_MemoryGroup->manage(&m_Tensor);
73  }
74  }

◆ Map()

const void * Map ( bool  blocking = true) const
inlineoverridevirtual

Map the tensor data for access.

Parameters
blockinghint to block the calling thread until all other accesses are complete. (backend dependent)
Returns
pointer to the first element of the mapped data.

Implements ITensorHandle.

Definition at line 76 of file GpuFsaTensorHandle.hpp.

77  {
78  const_cast<arm_compute::CLTensor*>(&m_Tensor)->map(blocking);
79  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
80  }

◆ SetImportEnabledFlag()

void SetImportEnabledFlag ( bool  importEnabledFlag)
inline

Definition at line 116 of file GpuFsaTensorHandle.hpp.

117  {
118  m_IsImportEnabled = importEnabledFlag;
119  }

◆ SetImportFlags()

void SetImportFlags ( MemorySourceFlags  importFlags)
inline

Definition at line 106 of file GpuFsaTensorHandle.hpp.

107  {
108  m_ImportFlags = importFlags;
109  }

◆ SetMemoryGroup()

virtual void SetMemoryGroup ( const std::shared_ptr< arm_compute::IMemoryGroup > &  memoryGroup)
inlineoverridevirtual

Implements IClTensorHandle.

Definition at line 91 of file GpuFsaTensorHandle.hpp.

92  {
93  m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
94  }

◆ Unmap()

virtual void Unmap ( ) const
inlineoverridevirtual

Unmap the tensor data.

Implements ITensorHandle.

Definition at line 82 of file GpuFsaTensorHandle.hpp.

82 { const_cast<arm_compute::CLTensor*>(&m_Tensor)->unmap(); }

The documentation for this class was generated from the following files:
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::MemorySource::Undefined
@ Undefined