// // Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "Layer.hpp" #include "Network.hpp" #include "WorkingMemDescriptor.hpp" #include #include #include #include #include namespace armnn { namespace experimental { class WorkingMemHandle final : public IWorkingMemHandle { public: struct InputMemDescriptorCoords { LayerBindingId m_LayerBindingId; std::vector> m_InputSlotCoords; }; struct OutputMemDescriptorCoords { std::vector m_LayerBindingIds; std::pair m_OutputSlotCoords; std::vector> m_InputSlotCoords; }; WorkingMemHandle(NetworkId networkId) : m_NetworkId(networkId){} WorkingMemHandle(NetworkId networkId, std::vector inputLayerInfo, std::vector outputLayerInfo, std::vector workingMemDescriptors, std::unordered_map workingMemDescriptorMap, std::unique_ptr memoryManager, std::vector, MemorySource>> tensorMemory, std::vector> managedTensorHandles, std::vector> unmanagedTensorHandles); ~WorkingMemHandle() { Free(); } NetworkId GetNetworkId() override { return m_NetworkId; } /// Allocate the backing memory required for execution. If this is not called, then allocation will be /// deferred to execution time. void Allocate() override; /// Free the backing memory required for execution. void Free() override; /// IsAllocated returns true if the backing memory is currently allocated. bool IsAllocated() override { return m_IsAllocated; } /// Get the WorkingMemDescriptor for a Layer. WorkingMemDescriptor& GetWorkingMemDescriptor(LayerGuid id) override { auto result = m_WorkingMemDescriptorMap.find(id); ARMNN_ASSERT(result != m_WorkingMemDescriptorMap.end()); return result->second; } /// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as /// the Workloads in a topologically sorted graph. WorkingMemDescriptor& GetWorkingMemDescriptorAt(unsigned int id) override { return m_WorkingMemDescriptors[id]; } ITensorHandle* GetInputHandle(LayerBindingId layerBindingId) const { return m_InputHandleMap.at(layerBindingId); }; ITensorHandle* GetOutputHandle(LayerBindingId layerBindingId) const { return m_OutputHandleMap.at(layerBindingId); }; const std::vector::iterator>& GetInputConnections(LayerBindingId layerBindingId) const { return m_InputConnectionMap.at(layerBindingId); }; const std::vector::iterator>& GetOutputConnection(LayerBindingId layerBindingId) const { return m_OutputConnectionMap.at(layerBindingId); }; void MemSyncOutputs(); std::vector& GetBindingIdVector() { return m_BindingIdVec; }; void ValidateBindingIds(); private: using DifferenceType = std::vector::difference_type; NetworkId m_NetworkId; std::unordered_map m_InputHandleMap; std::unordered_map m_OutputHandleMap; std::unordered_map::iterator>> m_InputConnectionMap; std::unordered_map::iterator>> m_OutputConnectionMap; std::vector m_WorkingMemDescriptors; std::unordered_map m_WorkingMemDescriptorMap; std::unique_ptr m_MemoryManager; // Memory to be imported into the tensorHandles after allocation std::vector, MemorySource>> m_TensorMemory; // Tensors that will need to be allocated internally within armnn std::vector> m_ManagedTensorHandles; // Tensors that will be allocated externally by the user std::vector> m_UnmanagedTensorHandles; std::unordered_map m_InputValidationMap; std::unordered_map m_OutputValidationMap; std::vector m_BindingIdVec; DifferenceType m_InputSize; bool m_IsAllocated; }; } // end experimental namespace } // end armnn namespace