22 #include <condition_variable> 23 #include <unordered_map> 42 std::shared_ptr<IAsyncExecutionCallback>>;
49 TerminateThreadPool();
54 std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle(
NetworkId networkId);
60 Status EnqueueWorkload(
const InputTensors& inputTensors,
const OutputTensors& outputTensors);
63 Status Execute(
const InputTensors& inputTensors,
64 const OutputTensors& outputTensors,
68 void Schedule(
const InputTensors& inputTensors,
69 const OutputTensors& outputTensors,
71 std::shared_ptr<IAsyncExecutionCallback> cb);
73 static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
74 std::string& errorMessage,
82 const std::shared_ptr<IProfiler>&
GetProfiler()
const {
return m_Profiler; }
84 void FreeWorkingMemory();
88 void SendNetworkStructure();
92 return m_NetworkProperties.m_AsyncEnabled;
98 using WorkloadFactoryWithMemoryManager =
99 std::pair<IBackendInternal::IWorkloadFactoryPtr, IBackendInternal::IMemoryManagerSharedPtr>;
101 using WorkloadFactoryMap = std::unordered_map<BackendId, WorkloadFactoryWithMemoryManager>;
103 void AllocateWorkingMemory(std::lock_guard<std::mutex>& lock);
104 void AllocateAndExecuteConstantWorkloads();
106 std::unordered_map<LayerGuid, ITensorHandle* > m_ConstantTensorHandles;
107 std::unordered_map<LayerGuid, std::unique_ptr<IWorkload> > m_ConstantWorkloads;
122 void ProcessExecPriorities(std::unique_ptr<IWorkingMemHandle> workingMemHandle);
124 bool Execute(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
127 void CreateThreadPool(std::size_t numThreads);
129 void TerminateThreadPool() noexcept;
133 using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
135 BackendPtrMap m_Backends;
136 WorkloadFactoryMap m_WorkloadFactories;
138 std::unique_ptr<IOptimizedNetwork> m_OptimizedNetwork;
139 std::shared_ptr<IProfiler> m_Profiler;
145 mutable std::mutex m_WorkingMemMutex;
147 bool m_IsWorkingMemAllocated =
false;
149 std::vector<std::unique_ptr<std::thread>> m_Threads;
150 std::stack<IWorkingMemHandle> m_WorkingMemHandles;
158 std::condition_variable m_ThreadPoolEvent;
159 std::mutex m_ThreadPoolMutex;
162 bool m_TerminatePool =
false;
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Copyright (c) 2021 ARM Limited and Contributors.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
std::tuple< InputTensors, OutputTensors, std::shared_ptr< IAsyncExecutionCallback > > ExecutionTuple
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::vector< std::unique_ptr< IWorkload > > WorkloadQueue
const std::shared_ptr< IProfiler > & GetProfiler() const
std::queue< std::shared_ptr< ExecutionTuple > > ExecutionQueue