ArmNN
 22.11
WorkingMemHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "ExecutionData.hpp"
9 #include "Layer.hpp"
10 #include "Network.hpp"
11 #include "WorkingMemDescriptor.hpp"
12 
14 #include <armnn/Tensor.hpp>
16 
17 #include <unordered_map>
18 #include <mutex>
20 
21 namespace armnn
22 {
23 
24 namespace experimental
25 {
26 
27 using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
28 
29 class WorkingMemHandle final : public IWorkingMemHandle
30 {
31 
32 public:
34  {
36 
37  std::vector<std::pair<unsigned int, unsigned int>> m_InputSlotCoords;
38  };
39 
41  {
42  std::vector<LayerBindingId> m_LayerBindingIds;
43 
44  std::pair<unsigned int, unsigned int> m_OutputSlotCoords;
45  std::vector<std::pair<unsigned int, unsigned int>> m_InputSlotCoords;
46  };
47 
48  WorkingMemHandle(NetworkId networkId) : m_NetworkId(networkId){}
49 
50  WorkingMemHandle(NetworkId networkId,
51  std::vector<InputMemDescriptorCoords> inputLayerInfo,
52  std::vector<OutputMemDescriptorCoords> outputLayerInfo,
53  std::vector<WorkingMemDescriptor> workingMemDescriptors,
54  std::unique_ptr<MemoryManager> memoryManager,
55  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory,
56  std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
57  std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles,
58  std::vector<std::pair<BackendId, ExecutionData>> executionDataVec,
59  BackendPtrMap* backends);
60 
62  { Free(); }
63 
65  {
66  return m_NetworkId;
67  }
68 
69  /// Allocate the backing memory required for execution. If this is not called, then allocation will be
70  /// deferred to execution time.
71  void Allocate() override;
72 
73  /// Free the backing memory required for execution.
74  void Free() override;
75 
76  /// IsAllocated returns true if the backing memory is currently allocated.
77  bool IsAllocated() override
78  {
79  return m_IsAllocated;
80  }
81 
82  /// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
83  /// the Workloads in a topologically sorted graph.
85  {
86  return m_WorkingMemDescriptors[id];
87  }
88 
89  /// Get the ExecutionData at an index.
90  /// The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it.
91  /// The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.
92  std::pair<BackendId, ExecutionData>& GetExecutionDataAt(unsigned int id) override
93  {
94  return m_ExecutionDataVec[id];
95  }
96 
98  {
99  return m_InputHandleMap.at(layerBindingId);
100  };
101 
103  {
104  return m_OutputHandleMap.at(layerBindingId);
105  };
106 
107  const std::vector<std::vector<ITensorHandle*>::iterator>& GetInputConnections(LayerBindingId layerBindingId) const
108  {
109  return m_InputConnectionMap.at(layerBindingId);
110  };
111 
112  const std::vector<std::vector<ITensorHandle*>::iterator>& GetOutputConnection(LayerBindingId layerBindingId) const
113  {
114  return m_OutputConnectionMap.at(layerBindingId);
115  };
116 
117  void MemSyncOutputs();
118 
119  std::vector<LayerBindingId>& GetBindingIdVector()
120  {
121  return m_BindingIdVec;
122  };
123 
124  void ValidateBindingIds();
125 
126 private:
127  using DifferenceType = std::vector<ITensorHandle*>::difference_type;
128  NetworkId m_NetworkId;
129 
130  std::unordered_map<LayerBindingId, ITensorHandle*> m_InputHandleMap;
131  std::unordered_map<LayerBindingId, ITensorHandle*> m_OutputHandleMap;
132  std::unordered_map<LayerBindingId, std::vector<std::vector<ITensorHandle*>::iterator>> m_InputConnectionMap;
133  std::unordered_map<LayerBindingId, std::vector<std::vector<ITensorHandle*>::iterator>> m_OutputConnectionMap;
134 
135  std::vector<WorkingMemDescriptor> m_WorkingMemDescriptors;
136 
137  std::unique_ptr<MemoryManager> m_MemoryManager;
138 
139  // Memory to be imported into the tensorHandles after allocation
140  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> m_TensorMemory;
141 
142  // Tensors that will need to be allocated internally within armnn
143  std::vector<std::unique_ptr<ITensorHandle>> m_ManagedTensorHandles;
144 
145  // Tensors that will be allocated externally by the user
146  std::vector<std::unique_ptr<ITensorHandle>> m_UnmanagedTensorHandles;
147 
148  std::unordered_map<LayerBindingId, bool> m_InputValidationMap;
149  std::unordered_map<LayerBindingId, bool> m_OutputValidationMap;
150 
151  std::vector<LayerBindingId> m_BindingIdVec;
152 
153  DifferenceType m_InputSize;
154 
155  bool m_IsAllocated;
156 
157  std::vector<std::pair<BackendId, ExecutionData>> m_ExecutionDataVec;
158 
159  BackendPtrMap* m_Backends;
160 };
161 
162 } // end experimental namespace
163 
164 } // end armnn namespace
std::unordered_map< BackendId, IBackendInternalUniquePtr > BackendPtrMap
std::vector< std::pair< unsigned int, unsigned int > > m_InputSlotCoords
WorkingMemDescriptor & GetWorkingMemDescriptorAt(unsigned int id) override
Get the WorkingMemDescriptor at an index.
std::pair< BackendId, ExecutionData > & GetExecutionDataAt(unsigned int id) override
Get the ExecutionData at an index.
ITensorHandle * GetOutputHandle(LayerBindingId layerBindingId) const
std::vector< std::pair< unsigned int, unsigned int > > m_InputSlotCoords
Copyright (c) 2021 ARM Limited and Contributors.
NetworkId GetNetworkId() override
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
const std::vector< std::vector< ITensorHandle * >::iterator > & GetOutputConnection(LayerBindingId layerBindingId) const
int NetworkId
Definition: IRuntime.hpp:35
const std::vector< std::vector< ITensorHandle * >::iterator > & GetInputConnections(LayerBindingId layerBindingId) const
void Allocate() override
Allocate the backing memory required for execution.
std::vector< LayerBindingId > & GetBindingIdVector()
bool IsAllocated() override
IsAllocated returns true if the backing memory is currently allocated.
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:230
ITensorHandle * GetInputHandle(LayerBindingId layerBindingId) const
void Free() override
Free the backing memory required for execution.