ArmNN
 23.08
WorkingMemHandle Class Referencefinal

#include <WorkingMemHandle.hpp>

Inheritance diagram for WorkingMemHandle:
[legend]
Collaboration diagram for WorkingMemHandle:
[legend]

Classes

struct  InputMemDescriptorCoords
 
struct  OutputMemDescriptorCoords
 

Public Member Functions

 WorkingMemHandle (NetworkId networkId)
 
 WorkingMemHandle (NetworkId networkId, std::vector< InputMemDescriptorCoords > inputLayerInfo, std::vector< OutputMemDescriptorCoords > outputLayerInfo, std::vector< WorkingMemDescriptor > workingMemDescriptors, std::unique_ptr< MemoryManager > memoryManager, std::vector< std::pair< std::shared_ptr< TensorMemory >, MemorySource >> tensorMemory, std::vector< std::unique_ptr< ITensorHandle >> managedTensorHandles, std::vector< std::unique_ptr< ITensorHandle >> unmanagedTensorHandles, std::vector< std::pair< BackendId, ExecutionData >> executionDataVec, BackendPtrMap *backends)
 
 ~WorkingMemHandle ()
 
NetworkId GetNetworkId () override
 Returns the NetworkId of the Network that this IWorkingMemHandle works with. More...
 
void Allocate () override
 Allocate the backing memory required for execution. More...
 
void Free () override
 Free the backing memory required for execution. More...
 
bool IsAllocated () override
 IsAllocated returns true if the backing memory is currently allocated. More...
 
WorkingMemDescriptorGetWorkingMemDescriptorAt (unsigned int id) override
 Get the WorkingMemDescriptor at an index. More...
 
std::pair< BackendId, ExecutionData > & GetExecutionDataAt (unsigned int id) override
 Get the ExecutionData at an index. More...
 
ITensorHandleGetInputHandle (LayerBindingId layerBindingId) const
 
ITensorHandleGetOutputHandle (LayerBindingId layerBindingId) const
 
const std::vector< std::vector< ITensorHandle * >::iterator > & GetInputConnections (LayerBindingId layerBindingId) const
 
const std::vector< std::vector< ITensorHandle * >::iterator > & GetOutputConnection (LayerBindingId layerBindingId) const
 
void MemSyncOutputs ()
 
std::vector< LayerBindingId > & GetBindingIdVector ()
 
void ValidateBindingIds ()
 
- Public Member Functions inherited from IWorkingMemHandle
virtual ~IWorkingMemHandle ()
 

Detailed Description

Definition at line 29 of file WorkingMemHandle.hpp.

Constructor & Destructor Documentation

◆ WorkingMemHandle() [1/2]

WorkingMemHandle ( NetworkId  networkId)
inline

Definition at line 48 of file WorkingMemHandle.hpp.

48 : m_NetworkId(networkId){}

◆ WorkingMemHandle() [2/2]

WorkingMemHandle ( NetworkId  networkId,
std::vector< InputMemDescriptorCoords inputLayerInfo,
std::vector< OutputMemDescriptorCoords outputLayerInfo,
std::vector< WorkingMemDescriptor workingMemDescriptors,
std::unique_ptr< MemoryManager memoryManager,
std::vector< std::pair< std::shared_ptr< TensorMemory >, MemorySource >>  tensorMemory,
std::vector< std::unique_ptr< ITensorHandle >>  managedTensorHandles,
std::vector< std::unique_ptr< ITensorHandle >>  unmanagedTensorHandles,
std::vector< std::pair< BackendId, ExecutionData >>  executionDataVec,
BackendPtrMap backends 
)

Definition at line 18 of file WorkingMemHandle.cpp.

28  : m_NetworkId(networkId)
29  , m_WorkingMemDescriptors(workingMemDescriptors)
30  , m_MemoryManager(std::move(memoryManager))
31  , m_TensorMemory(std::move(tensorMemory))
32  , m_ManagedTensorHandles(std::move(managedTensorHandles))
33  , m_UnmanagedTensorHandles(std::move(unmanagedTensorHandles))
34  , m_InputSize(numeric_cast<DifferenceType>(inputLayerInfo.size()))
35  , m_IsAllocated(false)
36  , m_ExecutionDataVec(executionDataVec)
37  , m_Backends(backends)
38 {
39  for (const auto& inputInfo : inputLayerInfo)
40  {
41  m_InputValidationMap[inputInfo.m_LayerBindingId] = false;
42 
43  // Map the LayerBindingIds to the corresponding input ITensorHandle*
44  auto memDesc = m_WorkingMemDescriptors.at(inputInfo.m_InputSlotCoords[0].first);
45  ITensorHandle* inputTensorHandle = memDesc.m_Inputs[inputInfo.m_InputSlotCoords[0].second];
46  m_InputHandleMap[inputInfo.m_LayerBindingId] = inputTensorHandle;
47 
48  // For every input we need to store all locations from which that input's ITensorHandle* is read.
49  // So we can, at a later point, swap in and out the ITensorHandle* at that location.
50  for (auto inputSlot : inputInfo.m_InputSlotCoords)
51  {
52  WorkingMemDescriptor& workingMemDescriptor = m_WorkingMemDescriptors.at(inputSlot.first);
53 
54  auto inputPos = workingMemDescriptor.m_Inputs.begin();
55 
56  // The DifferenceType of a vector can be unsigned int or signed int depending on the std implementation
57  // This cast removes any conversion warnings
58  inputPos += numeric_cast<DifferenceType>(inputSlot.second);
59  m_InputConnectionMap[inputInfo.m_LayerBindingId].push_back(inputPos);
60  }
61  }
62  size_t bindingIdCount = inputLayerInfo.size();
63  for (const auto& outputInfo : outputLayerInfo)
64  {
65  for (auto bindingId : outputInfo.m_LayerBindingIds)
66  {
67  m_OutputValidationMap[bindingId] = false;
68 
69  // Store the outputSlot position of the tensorhandle
70  auto outputPos = m_WorkingMemDescriptors.at(outputInfo.m_OutputSlotCoords.first).m_Outputs.begin();
71  outputPos += numeric_cast<DifferenceType>(outputInfo.m_OutputSlotCoords.second);
72 
73  m_OutputHandleMap[bindingId] = *outputPos;
74  }
75  bindingIdCount += outputInfo.m_LayerBindingIds.size();
76  // More than one layerBinding id means the tensorhandle is connected to more than one OutputLayer.
77  // Importing in this case would likely cause unexpected behaviour, so we disallow it.
78  if (outputInfo.m_LayerBindingIds.size() != 1)
79  {
80  continue;
81  }
82 
83  // Store the inputSlot positions of the tensorhandle
84  for (auto outputSlot : outputInfo.m_InputSlotCoords)
85  {
86  WorkingMemDescriptor& workingMemDescriptor = m_WorkingMemDescriptors.at(outputSlot.first);
87 
88  auto inputPos = workingMemDescriptor.m_Inputs.begin();
89 
90  // The DifferenceType of a vector can be unsigned int or signed int depending on the std implementation
91  // This cast removes any conversion warnings
92  inputPos += numeric_cast<DifferenceType>(outputSlot.second);
93  m_OutputConnectionMap[outputInfo.m_LayerBindingIds[0]].push_back(inputPos);
94  }
95  }
96  m_BindingIdVec = std::vector<LayerBindingId>(bindingIdCount);
97  IgnoreUnused(m_UnmanagedTensorHandles);
98 }

References armnn::IgnoreUnused(), and WorkingMemDescriptor::m_Inputs.

◆ ~WorkingMemHandle()

~WorkingMemHandle ( )
inline

Definition at line 61 of file WorkingMemHandle.hpp.

62  { Free(); }

References WorkingMemHandle::Free().

Member Function Documentation

◆ Allocate()

void Allocate ( )
overridevirtual

Allocate the backing memory required for execution.

If this is not called, then allocation will be deferred to execution time.

Implements IWorkingMemHandle.

Definition at line 100 of file WorkingMemHandle.cpp.

101 {
102  if (m_IsAllocated)
103  {
104  return;
105  }
106  m_IsAllocated = true;
107 
108  m_MemoryManager->Allocate();
109 
110  for (unsigned int i = 0; i < m_TensorMemory.size(); ++i)
111  {
112  m_ManagedTensorHandles[i]->Import(m_TensorMemory[i].first->m_Data, m_TensorMemory[i].second);
113  }
114 
115  // Assign previously allocated ExecutionData. Needs to be assigned after allocation so the void* are allocated.
116  for (unsigned int i = 0; i < m_ExecutionDataVec.size(); ++i)
117  {
118  auto& backend = m_Backends->at(m_ExecutionDataVec[i].first);
119 
120  ExecutionData executionData = backend->CreateExecutionData(GetWorkingMemDescriptorAt(i));
121  m_ExecutionDataVec[i].second = executionData;
122  }
123 }

References WorkingMemHandle::GetWorkingMemDescriptorAt().

Referenced by LoadedNetwork::Execute().

◆ Free()

void Free ( )
overridevirtual

Free the backing memory required for execution.

Implements IWorkingMemHandle.

Definition at line 125 of file WorkingMemHandle.cpp.

126 {
127  if (!m_IsAllocated)
128  {
129  return;
130  }
131  m_IsAllocated = false;
132 
133  m_MemoryManager->Deallocate();
134 }

Referenced by WorkingMemHandle::~WorkingMemHandle().

◆ GetBindingIdVector()

std::vector<LayerBindingId>& GetBindingIdVector ( )
inline

Definition at line 119 of file WorkingMemHandle.hpp.

120  {
121  return m_BindingIdVec;
122  };

Referenced by LoadedNetwork::Execute().

◆ GetExecutionDataAt()

std::pair<BackendId, ExecutionData>& GetExecutionDataAt ( unsigned int  id)
inlineoverridevirtual

Get the ExecutionData at an index.

The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it. The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.

Implements IWorkingMemHandle.

Definition at line 92 of file WorkingMemHandle.hpp.

93  {
94  return m_ExecutionDataVec[id];
95  }

Referenced by LoadedNetwork::Execute().

◆ GetInputConnections()

const std::vector<std::vector<ITensorHandle*>::iterator>& GetInputConnections ( LayerBindingId  layerBindingId) const
inline

Definition at line 107 of file WorkingMemHandle.hpp.

108  {
109  return m_InputConnectionMap.at(layerBindingId);
110  };

Referenced by LoadedNetwork::Execute().

◆ GetInputHandle()

ITensorHandle* GetInputHandle ( LayerBindingId  layerBindingId) const
inline

Definition at line 97 of file WorkingMemHandle.hpp.

98  {
99  return m_InputHandleMap.at(layerBindingId);
100  };

Referenced by LoadedNetwork::Execute().

◆ GetNetworkId()

NetworkId GetNetworkId ( )
inlineoverridevirtual

Returns the NetworkId of the Network that this IWorkingMemHandle works with.

Implements IWorkingMemHandle.

Definition at line 64 of file WorkingMemHandle.hpp.

65  {
66  return m_NetworkId;
67  }

◆ GetOutputConnection()

const std::vector<std::vector<ITensorHandle*>::iterator>& GetOutputConnection ( LayerBindingId  layerBindingId) const
inline

Definition at line 112 of file WorkingMemHandle.hpp.

113  {
114  return m_OutputConnectionMap.at(layerBindingId);
115  };

Referenced by LoadedNetwork::Execute().

◆ GetOutputHandle()

ITensorHandle* GetOutputHandle ( LayerBindingId  layerBindingId) const
inline

Definition at line 102 of file WorkingMemHandle.hpp.

103  {
104  return m_OutputHandleMap.at(layerBindingId);
105  };

Referenced by LoadedNetwork::Execute().

◆ GetWorkingMemDescriptorAt()

WorkingMemDescriptor& GetWorkingMemDescriptorAt ( unsigned int  id)
inlineoverridevirtual

Get the WorkingMemDescriptor at an index.

The WorkingMemDescriptors are stored in the same order as the Workloads in a topologically sorted graph.

Implements IWorkingMemHandle.

Definition at line 84 of file WorkingMemHandle.hpp.

85  {
86  return m_WorkingMemDescriptors[id];
87  }

Referenced by WorkingMemHandle::Allocate().

◆ IsAllocated()

bool IsAllocated ( )
inlineoverridevirtual

IsAllocated returns true if the backing memory is currently allocated.

Implements IWorkingMemHandle.

Definition at line 77 of file WorkingMemHandle.hpp.

78  {
79  return m_IsAllocated;
80  }

Referenced by LoadedNetwork::Execute().

◆ MemSyncOutputs()

void MemSyncOutputs ( )

Definition at line 136 of file WorkingMemHandle.cpp.

137 {
138  for (auto output : m_OutputConnectionMap)
139  {
140  (*output.second[0])->Map(true);
141  (*output.second[0])->Unmap();
142  }
143 }

References armnn::Map, and armnn::Unmap.

Referenced by LoadedNetwork::Execute().

◆ ValidateBindingIds()

void ValidateBindingIds ( )

Definition at line 145 of file WorkingMemHandle.cpp.

146 {
147  auto resetInputValidationMap = [&]()
148  {
149  for (auto& pair: m_InputValidationMap)
150  {
151  pair.second = false;
152  }
153  };
154 
155  auto resetOutputValidationMap = [&]()
156  {
157  for (auto& pair: m_OutputValidationMap)
158  {
159  pair.second = false;
160  }
161  };
162 
163  std::for_each(m_BindingIdVec.begin(), m_BindingIdVec.begin() + m_InputSize, [&](LayerBindingId id)
164  {
165  try
166  {
167  bool& isUsed = m_InputValidationMap.at(id);
168  if (isUsed)
169  {
170  resetInputValidationMap();
171  throw InvalidArgumentException(fmt::format("Duplicate Input LayerBindingId: {}", id));
172  }
173  isUsed = true;
174  }
175  catch (const std::out_of_range&)
176  {
177  resetInputValidationMap();
178  throw InvalidArgumentException(fmt::format("Unknown Input LayerBindingId: {}", id));
179  }
180  });
181  resetInputValidationMap();
182 
183  std::for_each(m_BindingIdVec.begin() + m_InputSize, m_BindingIdVec.end(), [&](LayerBindingId id)
184  {
185  try
186  {
187  bool& isUsed = m_OutputValidationMap.at(id);
188  if (isUsed)
189  {
190  resetOutputValidationMap();
191  throw InvalidArgumentException(fmt::format("Duplicate Output LayerBindingId: {}", id));
192  }
193  isUsed = true;
194  }
195  catch (const std::out_of_range&)
196  {
197  resetOutputValidationMap();
198  throw InvalidArgumentException(fmt::format("Unknown Output LayerBindingId: {}", id));
199  }
200  });
201  resetOutputValidationMap();
202 }

Referenced by LoadedNetwork::Execute().


The documentation for this class was generated from the following files:
armnn::experimental::WorkingMemHandle::Free
void Free() override
Free the backing memory required for execution.
Definition: WorkingMemHandle.cpp:125
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:303
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::LayerType::Map
@ Map
armnn::experimental::WorkingMemHandle::GetWorkingMemDescriptorAt
WorkingMemDescriptor & GetWorkingMemDescriptorAt(unsigned int id) override
Get the WorkingMemDescriptor at an index.
Definition: WorkingMemHandle.hpp:84
armnn::LayerType::Unmap
@ Unmap