25 #include <common/include/LabelsAndEventClasses.hpp> 28 #include <condition_variable> 29 #include <unordered_map> 53 std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle(
NetworkId networkId);
58 std::vector<ImportedInputId> ImportInputs(
const InputTensors& inputTensors,
59 MemorySource forceImportMemorySource = MemorySource::Undefined);
60 std::vector<ImportedOutputId> ImportOutputs(
const OutputTensors& outputTensors,
61 MemorySource forceImportMemorySource = MemorySource::Undefined);
63 void ClearImportedInputs(
const std::vector<ImportedInputId> inputIds);
64 void ClearImportedOutputs(
const std::vector<ImportedOutputId> outputIds);
68 std::vector<ImportedInputId> preImportedInputIds = {},
69 std::vector<ImportedOutputId> preImportedOutputIds = {});
75 std::vector<ImportedInputId> preImportedInputs = {},
76 std::vector<ImportedOutputId> preImportedOutputs = {});
78 static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
79 std::string& errorMessage,
86 const std::shared_ptr<IProfiler>&
GetProfiler()
const {
return m_OptimizedNetwork->GetProfiler(); }
88 void FreeWorkingMemory();
92 void SendNetworkStructure();
96 return m_NetworkProperties.m_AsyncEnabled;
99 profiling::ProfilingGuid GetNetworkGuid();
104 void AllocateWorkingMemory(std::lock_guard<std::mutex>& lock);
105 void AllocateAndExecuteConstantWorkloads();
106 void AllocateAndExecuteConstantWorkloadsAsync();
108 std::unordered_map<LayerGuid, std::unique_ptr<IWorkload>> m_ConstantWorkloads;
109 std::unordered_map<LayerGuid, ITensorHandle*> m_ConstantTensorHandles;
111 std::unique_ptr<IMemoryOptimizerStrategy> m_ConstantStrategy = std::make_unique<SingleAxisPriorityList>();
125 bool Execute(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
126 profiling::ProfilingGuid inferenceGuid);
133 void CreateMemoryProfile();
134 void CreateMemoryProfileAsync();
136 std::unique_ptr<MemoryManager> CreateExternalMemoryManger(
137 std::vector<std::pair<std::shared_ptr<TensorMemory>,
MemorySource>>& tensorMemory);
139 using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
141 BackendPtrMap m_Backends;
142 std::vector<IBackendInternal::IMemoryManagerSharedPtr> m_BackendMemoryMangers;
144 using WorkloadFactoryMap = std::unordered_map<BackendId, IBackendInternal::IWorkloadFactoryPtr>;
145 WorkloadFactoryMap m_WorkloadFactories;
147 std::unique_ptr<IOptimizedNetwork> m_OptimizedNetwork;
153 mutable std::mutex m_WorkingMemMutex;
155 bool m_IsWorkingMemAllocated =
false;
163 struct ImportedTensorHandlePin
165 ImportedTensorHandlePin()
169 std::unique_ptr<ITensorHandle> tensorHandle)
170 : m_LayerBindingId(layerBindingId)
171 , m_TensorHandle(std::move(tensorHandle))
174 ImportedTensorHandlePin(ImportedTensorHandlePin&&) =
default;
176 ~ImportedTensorHandlePin()
180 m_TensorHandle->Unimport();
185 std::unique_ptr<ITensorHandle> m_TensorHandle;
188 std::vector<ImportedTensorHandlePin> m_PreImportedInputHandles;
189 std::vector<ImportedTensorHandlePin> m_PreImportedOutputHandles;
194 std::unordered_map<BackendId, std::vector<MemBlock>> m_MemBlockMap;
195 std::unordered_map<BackendId, std::vector<MemBin>> m_MemBinMap;
197 std::vector<ITensorHandle*> m_Tensorhandles;
199 std::vector<std::pair<std::shared_ptr<TensorMemory>,
MemorySource>> m_TensorMemory;
201 std::unique_ptr<MemoryManager> m_ExternalMemoryManager;
203 std::unordered_map<BackendId, bool> m_SupportsExternallyManagedMemory;
207 struct WorkloadIndices
209 unsigned int m_WorkloadIndex;
210 unsigned int m_SlotIndex;
213 struct OutputWorkloadIndices
215 WorkloadIndices m_OutputSlotIndices;
216 std::vector<WorkloadIndices> m_InputSlotIndices;
218 std::unordered_map<LayerBindingId, std::vector<WorkloadIndices>> m_InputWorkloadSlotPairs;
219 std::unordered_map<LayerBindingId, OutputWorkloadIndices> m_OutputWorkloadSlotPairs;
220 std::vector<bool> m_IsInputImported;
221 std::vector<bool> m_IsOutputImported;
unsigned int ImportedOutputId
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Copyright (c) 2021 ARM Limited and Contributors.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::vector< std::unique_ptr< IWorkload > > WorkloadQueue
unsigned int ImportedInputId
const std::shared_ptr< IProfiler > & GetProfiler() const
MemorySource
Define the Memory Source to reduce copies.