ArmNN
 21.11
WorkingMemHandle.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "WorkingMemHandle.hpp"
8 #include "Network.hpp"
10 #include <fmt/format.h>
11 
12 namespace armnn
13 {
14 
15 namespace experimental
16 {
17 
19  std::vector<InputMemDescriptorCoords> inputLayerInfo,
20  std::vector<OutputMemDescriptorCoords> outputLayerInfo,
21  std::vector<WorkingMemDescriptor> workingMemDescriptors,
22  std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap,
23  std::unique_ptr<MemoryManager> memoryManager,
24  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory,
25  std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
26  std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles)
27  : m_NetworkId(networkId)
28  , m_WorkingMemDescriptors(workingMemDescriptors)
29  , m_WorkingMemDescriptorMap(workingMemDescriptorMap)
30  , m_MemoryManager(std::move(memoryManager))
31  , m_TensorMemory(std::move(tensorMemory))
32  , m_ManagedTensorHandles(std::move(managedTensorHandles))
33  , m_UnmanagedTensorHandles(std::move(unmanagedTensorHandles))
34  , m_InputSize(numeric_cast<DifferenceType>(inputLayerInfo.size()))
35  , m_IsAllocated(false)
36 {
37  for (const auto& inputInfo : inputLayerInfo)
38  {
39  m_InputValidationMap[inputInfo.m_LayerBindingId] = false;
40 
41  // Map the LayerBindingIds to the corresponding input ITensorHandle*
42  auto memDesc = m_WorkingMemDescriptors.at(inputInfo.m_InputSlotCoords[0].first);
43  ITensorHandle* inputTensorHandle = memDesc.m_Inputs[inputInfo.m_InputSlotCoords[0].second];
44  m_InputHandleMap[inputInfo.m_LayerBindingId] = inputTensorHandle;
45 
46  // For every input we need to store all locations from which that input's ITensorHandle* is read.
47  // So we can, at a later point, swap in and out the ITensorHandle* at that location.
48  for (auto inputSlot : inputInfo.m_InputSlotCoords)
49  {
50  WorkingMemDescriptor& workingMemDescriptor = m_WorkingMemDescriptors.at(inputSlot.first);
51 
52  auto inputPos = workingMemDescriptor.m_Inputs.begin();
53 
54  // The DifferenceType of a vector can be unsigned int or signed int depending on the std implementation
55  // This cast removes any conversion warnings
56  inputPos += numeric_cast<DifferenceType>(inputSlot.second);
57  m_InputConnectionMap[inputInfo.m_LayerBindingId].push_back(inputPos);
58  }
59  }
60  size_t bindingIdCount = inputLayerInfo.size();
61  for (const auto& outputInfo : outputLayerInfo)
62  {
63  for (auto bindingId : outputInfo.m_LayerBindingIds)
64  {
65  m_OutputValidationMap[bindingId] = false;
66 
67  // Store the outputSlot position of the tensorhandle
68  auto outputPos = m_WorkingMemDescriptors.at(outputInfo.m_OutputSlotCoords.first).m_Outputs.begin();
69  outputPos += numeric_cast<DifferenceType>(outputInfo.m_OutputSlotCoords.second);
70 
71  m_OutputHandleMap[bindingId] = *outputPos;
72  }
73  bindingIdCount += outputInfo.m_LayerBindingIds.size();
74  // More than one layerBinding id means the tensorhandle is connected to more than one OutputLayer.
75  // Importing in this case would likely cause unexpected behaviour, so we disallow it.
76  if (outputInfo.m_LayerBindingIds.size() != 1)
77  {
78  continue;
79  }
80 
81  // Store the inputSlot positions of the tensorhandle
82  for (auto outputSlot : outputInfo.m_InputSlotCoords)
83  {
84  WorkingMemDescriptor& workingMemDescriptor = m_WorkingMemDescriptors.at(outputSlot.first);
85 
86  auto inputPos = workingMemDescriptor.m_Inputs.begin();
87 
88  // The DifferenceType of a vector can be unsigned int or signed int depending on the std implementation
89  // This cast removes any conversion warnings
90  inputPos += numeric_cast<DifferenceType>(outputSlot.second);
91  m_OutputConnectionMap[outputInfo.m_LayerBindingIds[0]].push_back(inputPos);
92  }
93  }
94  m_BindingIdVec = std::vector<LayerBindingId>(bindingIdCount);
95  IgnoreUnused(m_UnmanagedTensorHandles);
96 }
97 
99 {
100  if (m_IsAllocated)
101  {
102  return;
103  }
104  m_IsAllocated = true;
105 
106  m_MemoryManager->Allocate();
107 
108  for (unsigned int i = 0; i < m_TensorMemory.size(); ++i)
109  {
110  m_ManagedTensorHandles[i]->Import(m_TensorMemory[i].first->m_Data, m_TensorMemory[i].second);
111  }
112 }
113 
115 {
116  if (!m_IsAllocated)
117  {
118  return;
119  }
120  m_IsAllocated = false;
121 
122  m_MemoryManager->Deallocate();
123 }
124 
126 {
127  for (auto output : m_OutputConnectionMap)
128  {
129  (*output.second[0])->Map(true);
130  (*output.second[0])->Unmap();
131  }
132 }
133 
135 {
136  auto resetInputValidationMap = [&]()
137  {
138  for (auto& pair: m_InputValidationMap)
139  {
140  pair.second = false;
141  }
142  };
143 
144  auto resetOutputValidationMap = [&]()
145  {
146  for (auto& pair: m_OutputValidationMap)
147  {
148  pair.second = false;
149  }
150  };
151 
152  std::for_each(m_BindingIdVec.begin(), m_BindingIdVec.begin() + m_InputSize, [&](LayerBindingId id)
153  {
154  try
155  {
156  bool& isUsed = m_InputValidationMap.at(id);
157  if (isUsed)
158  {
159  resetInputValidationMap();
160  throw InvalidArgumentException(fmt::format("Duplicate Input LayerBindingId: {}", id));
161  }
162  isUsed = true;
163  }
164  catch (const std::out_of_range&)
165  {
166  resetInputValidationMap();
167  throw InvalidArgumentException(fmt::format("Unknown Input LayerBindingId: {}", id));
168  }
169  });
170  resetInputValidationMap();
171 
172  std::for_each(m_BindingIdVec.begin() + m_InputSize, m_BindingIdVec.end(), [&](LayerBindingId id)
173  {
174  try
175  {
176  bool& isUsed = m_OutputValidationMap.at(id);
177  if (isUsed)
178  {
179  resetOutputValidationMap();
180  throw InvalidArgumentException(fmt::format("Duplicate Output LayerBindingId: {}", id));
181  }
182  isUsed = true;
183  }
184  catch (const std::out_of_range&)
185  {
186  resetOutputValidationMap();
187  throw InvalidArgumentException(fmt::format("Unknown Output LayerBindingId: {}", id));
188  }
189  });
190  resetOutputValidationMap();
191 }
192 
193 } // end experimental namespace
194 
195 } // end armnn namespace
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
int NetworkId
Definition: IRuntime.hpp:25
void Allocate() override
Allocate the backing memory required for execution.
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:217
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
void Free() override
Free the backing memory required for execution.