25 #include <common/include/LabelsAndEventClasses.hpp> 28 #include <condition_variable> 29 #include <unordered_map> 53 std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle(
NetworkId networkId);
58 std::vector<ImportedInputId> ImportInputs(
const InputTensors& inputTensors);
59 std::vector<ImportedOutputId> ImportOutputs(
const OutputTensors& outputTensors);
61 void ClearImportedInputs(
const std::vector<ImportedInputId> inputIds);
62 void ClearImportedOutputs(
const std::vector<ImportedOutputId> outputIds);
71 std::vector<ImportedInputId> preImportedInputs = {},
72 std::vector<ImportedOutputId> preImportedOutputs = {});
74 static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
75 std::string& errorMessage,
82 const std::shared_ptr<IProfiler>&
GetProfiler()
const {
return m_OptimizedNetwork->GetProfiler(); }
84 void FreeWorkingMemory();
88 void SendNetworkStructure();
92 return m_NetworkProperties.m_AsyncEnabled;
95 profiling::ProfilingGuid GetNetworkGuid();
100 void AllocateWorkingMemory(std::lock_guard<std::mutex>& lock);
101 void AllocateAndExecuteConstantWorkloads();
102 void AllocateAndExecuteConstantWorkloadsAsync();
104 std::unordered_map<LayerGuid, std::unique_ptr<IWorkload>> m_ConstantWorkloads;
105 std::unordered_map<LayerGuid, ITensorHandle*> m_ConstantTensorHandles;
107 std::unique_ptr<IMemoryOptimizerStrategy> m_ConstantStrategy = std::make_unique<SingleAxisPriorityList>();
121 bool Execute(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
122 profiling::ProfilingGuid inferenceGuid);
129 void CreateMemoryProfile();
130 void CreateMemoryProfileAsync();
132 std::unique_ptr<MemoryManager> CreateExternalMemoryManger(
133 std::vector<std::pair<std::shared_ptr<TensorMemory>,
MemorySource>>& tensorMemory);
135 using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
137 BackendPtrMap m_Backends;
138 std::vector<IBackendInternal::IMemoryManagerSharedPtr> m_BackendMemoryMangers;
140 using WorkloadFactoryMap = std::unordered_map<BackendId, IBackendInternal::IWorkloadFactoryPtr>;
141 WorkloadFactoryMap m_WorkloadFactories;
143 std::unique_ptr<IOptimizedNetwork> m_OptimizedNetwork;
149 mutable std::mutex m_WorkingMemMutex;
151 bool m_IsWorkingMemAllocated =
false;
159 struct ImportedTensorHandlePin
161 ImportedTensorHandlePin()
165 std::unique_ptr<ITensorHandle> tensorHandle)
166 : m_LayerBindingId(layerBindingId)
167 , m_TensorHandle(std::move(tensorHandle))
170 ImportedTensorHandlePin(ImportedTensorHandlePin&&) =
default;
172 ~ImportedTensorHandlePin()
176 m_TensorHandle->Unimport();
181 std::unique_ptr<ITensorHandle> m_TensorHandle;
184 std::vector<ImportedTensorHandlePin> m_PreImportedInputHandles;
185 std::vector<ImportedTensorHandlePin> m_PreImportedOutputHandles;
190 std::unordered_map<BackendId, std::vector<MemBlock>> m_MemBlockMap;
191 std::unordered_map<BackendId, std::vector<MemBin>> m_MemBinMap;
193 std::vector<ITensorHandle*> m_Tensorhandles;
195 std::vector<std::pair<std::shared_ptr<TensorMemory>,
MemorySource>> m_TensorMemory;
197 std::unique_ptr<MemoryManager> m_ExternalMemoryManager;
199 std::unordered_map<BackendId, bool> m_SupportsExternallyManagedMemory;
unsigned int ImportedOutputId
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Copyright (c) 2021 ARM Limited and Contributors.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::vector< std::unique_ptr< IWorkload > > WorkloadQueue
unsigned int ImportedInputId
const std::shared_ptr< IProfiler > & GetProfiler() const
MemorySource
Define the Memory Source to reduce copies.