16 #include <condition_variable> 17 #include <unordered_map> 22 namespace experimental
29 std::vector<std::shared_ptr<IWorkingMemHandle>> memHandles);
36 void LoadMemHandles(std::vector<std::shared_ptr<IWorkingMemHandle>> memHandles);
44 std::shared_ptr<IAsyncExecutionCallback> cb);
49 using ExecutionTuple = std::tuple<
NetworkId,
52 std::shared_ptr<IAsyncExecutionCallback>>;
54 using ExecutionQueue = std::queue<std::shared_ptr<ExecutionTuple>>;
56 void ProcessExecPriorities(uint32_t index);
60 ExecutionQueue m_HighPriorityQueue;
61 ExecutionQueue m_MediumPriorityQueue;
62 ExecutionQueue m_LowPriorityQueue;
66 std::condition_variable m_ThreadPoolEvent;
67 std::mutex m_ThreadPoolMutex;
70 bool m_TerminatePool =
false;
72 std::unordered_map<NetworkId, std::vector<std::shared_ptr<IWorkingMemHandle>>> m_WorkingMemHandleMap;
73 std::vector<std::unique_ptr<std::thread>> m_Threads;
void LoadMemHandles(std::vector< std::shared_ptr< IWorkingMemHandle >> memHandles)
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Copyright (c) 2021 ARM Limited and Contributors.
void TerminateThreadPool() noexcept
void Schedule(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, const QosExecPriority priority, std::shared_ptr< IAsyncExecutionCallback > cb)
Schedule an asynchronous execution on the loaded network.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Threadpool(std::size_t numThreads, IRuntime *runtimePtr, std::vector< std::shared_ptr< IWorkingMemHandle >> memHandles)
void UnloadMemHandles(NetworkId networkId)