ArmNN
 24.02
WorkingMemHandle.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "WorkingMemHandle.hpp"
7 #include "Network.hpp"
10 #include <fmt/format.h>
11 
12 namespace armnn
13 {
14 
15 namespace experimental
16 {
17 
19  std::vector<InputMemDescriptorCoords> inputLayerInfo,
20  std::vector<OutputMemDescriptorCoords> outputLayerInfo,
21  std::vector<WorkingMemDescriptor> workingMemDescriptors,
22  std::unique_ptr<MemoryManager> memoryManager,
23  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory,
24  std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
25  std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles,
26  std::vector<std::pair<BackendId, ExecutionData>> executionDataVec,
27  BackendPtrMap* backends)
28  : m_NetworkId(networkId)
29  , m_WorkingMemDescriptors(workingMemDescriptors)
30  , m_MemoryManager(std::move(memoryManager))
31  , m_TensorMemory(std::move(tensorMemory))
32  , m_ManagedTensorHandles(std::move(managedTensorHandles))
33  , m_UnmanagedTensorHandles(std::move(unmanagedTensorHandles))
34  , m_InputSize(numeric_cast<DifferenceType>(inputLayerInfo.size()))
35  , m_IsAllocated(false)
36  , m_ExecutionDataVec(executionDataVec)
37  , m_Backends(backends)
38 {
39  for (const auto& inputInfo : inputLayerInfo)
40  {
41  m_InputValidationMap[inputInfo.m_LayerBindingId] = false;
42 
43  // Map the LayerBindingIds to the corresponding input ITensorHandle*
44  auto memDesc = m_WorkingMemDescriptors.at(inputInfo.m_InputSlotCoords[0].first);
45  ITensorHandle* inputTensorHandle = memDesc.m_Inputs[inputInfo.m_InputSlotCoords[0].second];
46  m_InputHandleMap[inputInfo.m_LayerBindingId] = inputTensorHandle;
47 
48  // For every input we need to store all locations from which that input's ITensorHandle* is read.
49  // So we can, at a later point, swap in and out the ITensorHandle* at that location.
50  for (auto inputSlot : inputInfo.m_InputSlotCoords)
51  {
52  WorkingMemDescriptor& workingMemDescriptor = m_WorkingMemDescriptors.at(inputSlot.first);
53 
54  auto inputPos = workingMemDescriptor.m_Inputs.begin();
55 
56  // The DifferenceType of a vector can be unsigned int or signed int depending on the std implementation
57  // This cast removes any conversion warnings
58  inputPos += numeric_cast<DifferenceType>(inputSlot.second);
59  m_InputConnectionMap[inputInfo.m_LayerBindingId].push_back(inputPos);
60  }
61  }
62  size_t bindingIdCount = inputLayerInfo.size();
63  for (const auto& outputInfo : outputLayerInfo)
64  {
65  for (auto bindingId : outputInfo.m_LayerBindingIds)
66  {
67  m_OutputValidationMap[bindingId] = false;
68 
69  // Store the outputSlot position of the tensorhandle
70  auto outputPos = m_WorkingMemDescriptors.at(outputInfo.m_OutputSlotCoords.first).m_Outputs.begin();
71  outputPos += numeric_cast<DifferenceType>(outputInfo.m_OutputSlotCoords.second);
72 
73  m_OutputHandleMap[bindingId] = *outputPos;
74  }
75  bindingIdCount += outputInfo.m_LayerBindingIds.size();
76  // More than one layerBinding id means the tensorhandle is connected to more than one OutputLayer.
77  // Importing in this case would likely cause unexpected behaviour, so we disallow it.
78  if (outputInfo.m_LayerBindingIds.size() != 1)
79  {
80  continue;
81  }
82 
83  // Store the inputSlot positions of the tensorhandle
84  for (auto outputSlot : outputInfo.m_InputSlotCoords)
85  {
86  WorkingMemDescriptor& workingMemDescriptor = m_WorkingMemDescriptors.at(outputSlot.first);
87 
88  auto inputPos = workingMemDescriptor.m_Inputs.begin();
89 
90  // The DifferenceType of a vector can be unsigned int or signed int depending on the std implementation
91  // This cast removes any conversion warnings
92  inputPos += numeric_cast<DifferenceType>(outputSlot.second);
93  m_OutputConnectionMap[outputInfo.m_LayerBindingIds[0]].push_back(inputPos);
94  }
95  }
96  m_BindingIdVec = std::vector<LayerBindingId>(bindingIdCount);
97  IgnoreUnused(m_UnmanagedTensorHandles);
98 }
99 
101 {
102  if (m_IsAllocated)
103  {
104  return;
105  }
106  m_IsAllocated = true;
107 
108  m_MemoryManager->Allocate();
109 
110  for (unsigned int i = 0; i < m_TensorMemory.size(); ++i)
111  {
112  m_ManagedTensorHandles[i]->Import(m_TensorMemory[i].first->m_Data, m_TensorMemory[i].second);
113  }
114 
115  // Assign previously allocated ExecutionData. Needs to be assigned after allocation so the void* are allocated.
116  for (unsigned int i = 0; i < m_ExecutionDataVec.size(); ++i)
117  {
118  auto& backend = m_Backends->at(m_ExecutionDataVec[i].first);
119 
120  ExecutionData executionData = backend->CreateExecutionData(GetWorkingMemDescriptorAt(i));
121  m_ExecutionDataVec[i].second = executionData;
122  }
123 }
124 
126 {
127  if (!m_IsAllocated)
128  {
129  return;
130  }
131  m_IsAllocated = false;
132 
133  m_MemoryManager->Deallocate();
134 }
135 
137 {
138  for (auto output : m_OutputConnectionMap)
139  {
140  (*output.second[0])->Map(true);
141  (*output.second[0])->Unmap();
142  }
143 }
144 
146 {
147  auto resetInputValidationMap = [&]()
148  {
149  for (auto& pair: m_InputValidationMap)
150  {
151  pair.second = false;
152  }
153  };
154 
155  auto resetOutputValidationMap = [&]()
156  {
157  for (auto& pair: m_OutputValidationMap)
158  {
159  pair.second = false;
160  }
161  };
162 
163  std::for_each(m_BindingIdVec.begin(), m_BindingIdVec.begin() + m_InputSize, [&](LayerBindingId id)
164  {
165  try
166  {
167  bool& isUsed = m_InputValidationMap.at(id);
168  if (isUsed)
169  {
170  resetInputValidationMap();
171  throw InvalidArgumentException(fmt::format("Duplicate Input LayerBindingId: {}", id));
172  }
173  isUsed = true;
174  }
175  catch (const std::out_of_range&)
176  {
177  resetInputValidationMap();
178  throw InvalidArgumentException(fmt::format("Unknown Input LayerBindingId: {}", id));
179  }
180  });
181  resetInputValidationMap();
182 
183  std::for_each(m_BindingIdVec.begin() + m_InputSize, m_BindingIdVec.end(), [&](LayerBindingId id)
184  {
185  try
186  {
187  bool& isUsed = m_OutputValidationMap.at(id);
188  if (isUsed)
189  {
190  resetOutputValidationMap();
191  throw InvalidArgumentException(fmt::format("Duplicate Output LayerBindingId: {}", id));
192  }
193  isUsed = true;
194  }
195  catch (const std::out_of_range&)
196  {
197  resetOutputValidationMap();
198  throw InvalidArgumentException(fmt::format("Unknown Output LayerBindingId: {}", id));
199  }
200  });
201  resetOutputValidationMap();
202 }
203 
204 } // end experimental namespace
205 
206 } // end armnn namespace
armnn::experimental::WorkingMemHandle::Free
void Free() override
Free the backing memory required for execution.
Definition: WorkingMemHandle.cpp:125
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::numeric_cast
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::NetworkId
int NetworkId
Definition: IRuntime.hpp:35
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::experimental::WorkingMemHandle::MemSyncOutputs
void MemSyncOutputs()
Definition: WorkingMemHandle.cpp:136
WorkingMemHandle.hpp
TensorHandle.hpp
Network.hpp
std
Definition: BackendId.hpp:149
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::MemorySource
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:244
armnn::LayerType::Map
@ Map
armnn::experimental::WorkingMemHandle::ValidateBindingIds
void ValidateBindingIds()
Definition: WorkingMemHandle.cpp:145
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkingMemDescriptor.hpp:20
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
armnn::experimental::WorkingMemHandle::GetWorkingMemDescriptorAt
WorkingMemDescriptor & GetWorkingMemDescriptorAt(unsigned int id) override
Get the WorkingMemDescriptor at an index.
Definition: WorkingMemHandle.hpp:84
armnn::experimental::BackendPtrMap
std::unordered_map< BackendId, IBackendInternalUniquePtr > BackendPtrMap
Definition: WorkingMemHandle.hpp:27
armnn::experimental::WorkingMemHandle::Allocate
void Allocate() override
Allocate the backing memory required for execution.
Definition: WorkingMemHandle.cpp:100
armnn::LayerType::Unmap
@ Unmap
IMemoryManager.hpp
armnn::experimental::WorkingMemHandle::WorkingMemHandle
WorkingMemHandle(NetworkId networkId)
Definition: WorkingMemHandle.hpp:48
armnn::experimental::ExecutionData
Definition: ExecutionData.hpp:14