diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-04-26 12:06:34 +0100 |
---|---|---|
committer | finn.williams <finn.williams@arm.com> | 2021-04-28 11:39:10 +0000 |
commit | 01097941ef85073c56cbd1d5f00d7e8ffeb9876d (patch) | |
tree | 818686d467b142084e0e49bbd4084670d1d0d50b /src/armnn/WorkingMemHandle.hpp | |
parent | c2b99a8783388ec3bd90dfed2e1b6d4f4d4bd1c8 (diff) | |
download | armnn-01097941ef85073c56cbd1d5f00d7e8ffeb9876d.tar.gz |
IVGCVSW-5843 Separate memory managers for WorkingMemHandles
* Add inter layer memory management to WorkingMemHandle
* Change Const layers to be executed once in loadedNetworkConstruction
and share tensorHandle between all WorkingMemHandles
* Fix various reference workloads pointing to memory in the queueDescriptor
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I69d4b3c5c84d2f5abe4540c3e624ab4f00d88226
Diffstat (limited to 'src/armnn/WorkingMemHandle.hpp')
-rw-r--r-- | src/armnn/WorkingMemHandle.hpp | 59 |
1 files changed, 13 insertions, 46 deletions
diff --git a/src/armnn/WorkingMemHandle.hpp b/src/armnn/WorkingMemHandle.hpp index cef6fb6fd3..92b0acaec3 100644 --- a/src/armnn/WorkingMemHandle.hpp +++ b/src/armnn/WorkingMemHandle.hpp @@ -26,10 +26,12 @@ class WorkingMemHandle final : public IWorkingMemHandle public: WorkingMemHandle(NetworkId networkId, std::vector<WorkingMemDescriptor> workingMemDescriptors, - std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap); + std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap, + std::vector<std::shared_ptr<IMemoryManager>> memoryManagers, + std::unordered_map<LayerGuid, std::vector<std::unique_ptr<ITensorHandle> > > ownedTensorHandles); ~WorkingMemHandle() - { FreeWorkingMemory(); } + { Free(); } NetworkId GetNetworkId() override { @@ -38,50 +40,10 @@ public: /// Allocate the backing memory required for execution. If this is not called, then allocation will be /// deferred to execution time. The mutex must be locked. - void Allocate() override - { - if (m_IsAllocated) - { - return; - } - m_IsAllocated = true; - - // Iterate through all WorkingMemDescriptors calling allocate() on each input and output in turn - for (auto workingMemDescriptor : m_WorkingMemDescriptors) - { - for (auto& input : workingMemDescriptor.m_Inputs) - { - input->Allocate(); - } - for (auto& output : workingMemDescriptor.m_Outputs) - { - output->Allocate(); - } - } - } + void Allocate() override; /// Free the backing memory required for execution. The mutex must be locked. - void Free() override - { - if (!m_IsAllocated) - { - return; - } - m_IsAllocated = false; - - // Iterate through all WorkingMemDescriptors calling free() on each input and output in turn - for (auto workingMemDescriptor : m_WorkingMemDescriptors) - { - for (auto& input : workingMemDescriptor.m_Inputs) - { - input->Unmap(); - } - for (auto& output : workingMemDescriptor.m_Outputs) - { - output->Unmap(); - } - } - } + void Free() override; /// IsAllocated returns true if the backing memory is currently allocated. The mutex must be locked. bool IsAllocated() override @@ -111,13 +73,18 @@ public: } private: - void FreeWorkingMemory(); - NetworkId m_NetworkId; std::shared_ptr<ProfilerImpl> m_Profiler; std::vector<WorkingMemDescriptor> m_WorkingMemDescriptors; std::unordered_map<LayerGuid, WorkingMemDescriptor> m_WorkingMemDescriptorMap; + + // Vector of IMemoryManagers that manage the WorkingMemHandle's memory + std::vector<std::shared_ptr<IMemoryManager>> m_MemoryManagers; + // TensorHandles owned by this WorkingMemHandle + // constant tensor's can be shared by multiple WorkingMemHandles and so will not be stored here + std::unordered_map<LayerGuid, std::vector<std::unique_ptr<ITensorHandle> > > m_OwnedTensorHandles; + bool m_IsAllocated; std::mutex m_Mutex; }; |