ArmNN
 21.05
WorkingMemHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "Layer.hpp"
9 #include "Network.hpp"
10 #include "WorkingMemDescriptor.hpp"
11 
13 #include <armnn/Tensor.hpp>
14 
15 #include <unordered_map>
16 
17 namespace armnn
18 {
19 
20 namespace experimental
21 {
22 
23 class WorkingMemHandle final : public IWorkingMemHandle
24 {
25 
26 public:
27  WorkingMemHandle(NetworkId networkId,
28  std::vector<WorkingMemDescriptor> workingMemDescriptors,
29  std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap,
30  std::vector<std::shared_ptr<IMemoryManager>> memoryManagers,
31  std::unordered_map<LayerGuid, std::vector<std::unique_ptr<ITensorHandle> > > ownedTensorHandles);
32 
34  { Free(); }
35 
37  {
38  return m_NetworkId;
39  }
40 
42  {
43  return m_InferenceId;
44  }
45 
46  /// Allocate the backing memory required for execution. If this is not called, then allocation will be
47  /// deferred to execution time. The mutex must be locked.
48  void Allocate() override;
49 
50  /// Free the backing memory required for execution. The mutex must be locked.
51  void Free() override;
52 
53  /// IsAllocated returns true if the backing memory is currently allocated. The mutex must be locked.
54  bool IsAllocated() override
55  {
56  return m_IsAllocated;
57  }
58 
59  /// Get a mutex which can be used for synchronizing access to the WorkingMemHandle object.
60  std::mutex& GetMutex() override
61  {
62  return m_Mutex;
63  }
64 
65  /// Get the WorkingMemDescriptor for a Layer. The mutex must be locked.
67  {
68  auto result = m_WorkingMemDescriptorMap.find(id);
69  ARMNN_ASSERT(result != m_WorkingMemDescriptorMap.end());
70  return result->second;
71  }
72 
73  /// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
74  /// the Workloads in a topologically sorted graph. The mutex must be locked.
76  {
77  return m_WorkingMemDescriptors[id];
78  }
79 
80 private:
81  NetworkId m_NetworkId;
82  std::shared_ptr<ProfilerImpl> m_Profiler;
83 
84  std::vector<WorkingMemDescriptor> m_WorkingMemDescriptors;
85  std::unordered_map<LayerGuid, WorkingMemDescriptor> m_WorkingMemDescriptorMap;
86 
87  // Vector of IMemoryManagers that manage the WorkingMemHandle's memory
88  std::vector<std::shared_ptr<IMemoryManager>> m_MemoryManagers;
89  // TensorHandles owned by this WorkingMemHandle
90  // constant tensor's can be shared by multiple WorkingMemHandles and so will not be stored here
91  std::unordered_map<LayerGuid, std::vector<std::unique_ptr<ITensorHandle> > > m_OwnedTensorHandles;
92 
93  bool m_IsAllocated;
94  std::mutex m_Mutex;
95  profiling::ProfilingGuid m_InferenceId;
96 };
97 
98 } // end experimental namespace
99 
100 } // end armnn namespace
WorkingMemDescriptor & GetWorkingMemDescriptorAt(unsigned int id) override
Get the WorkingMemDescriptor at an index.
Copyright (c) 2021 ARM Limited and Contributors.
NetworkId GetNetworkId() override
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
WorkingMemDescriptor & GetWorkingMemDescriptor(LayerGuid id) override
Get the WorkingMemDescriptor for a Layer. The mutex must be locked.
int NetworkId
Definition: IRuntime.hpp:22
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void Allocate() override
Allocate the backing memory required for execution.
profiling::ProfilingGuid GetInferenceId() override
Returns the InferenceId of the Inference that this IWorkingMemHandle works with.
std::mutex & GetMutex() override
Get a mutex which can be used for synchronizing access to the WorkingMemHandle object.
bool IsAllocated() override
IsAllocated returns true if the backing memory is currently allocated. The mutex must be locked...
WorkingMemHandle(NetworkId networkId, std::vector< WorkingMemDescriptor > workingMemDescriptors, std::unordered_map< LayerGuid, WorkingMemDescriptor > workingMemDescriptorMap, std::vector< std::shared_ptr< IMemoryManager >> memoryManagers, std::unordered_map< LayerGuid, std::vector< std::unique_ptr< ITensorHandle > > > ownedTensorHandles)
void Free() override
Free the backing memory required for execution. The mutex must be locked.