5 #if !defined(ARMNN_DISABLE_THREADS) 17 #include <condition_variable> 18 #include <unordered_map> 23 namespace experimental
30 std::vector<std::shared_ptr<IWorkingMemHandle>> memHandles);
37 void LoadMemHandles(std::vector<std::shared_ptr<IWorkingMemHandle>> memHandles);
45 std::shared_ptr<IAsyncExecutionCallback> cb);
50 using ExecutionTuple = std::tuple<
NetworkId,
53 std::shared_ptr<IAsyncExecutionCallback>>;
55 using ExecutionQueue = std::queue<std::shared_ptr<ExecutionTuple>>;
57 void ProcessExecPriorities(uint32_t index);
61 ExecutionQueue m_HighPriorityQueue;
62 ExecutionQueue m_MediumPriorityQueue;
63 ExecutionQueue m_LowPriorityQueue;
67 std::condition_variable m_ThreadPoolEvent;
68 std::mutex m_ThreadPoolMutex;
71 bool m_TerminatePool =
false;
73 std::unordered_map<NetworkId, std::vector<std::shared_ptr<IWorkingMemHandle>>> m_WorkingMemHandleMap;
74 std::vector<std::unique_ptr<std::thread>> m_Threads;
void LoadMemHandles(std::vector< std::shared_ptr< IWorkingMemHandle >> memHandles)
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Copyright (c) 2021 ARM Limited and Contributors.
void TerminateThreadPool() noexcept
void Schedule(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, const QosExecPriority priority, std::shared_ptr< IAsyncExecutionCallback > cb)
Schedule an asynchronous execution on the loaded network.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Threadpool(std::size_t numThreads, IRuntime *runtimePtr, std::vector< std::shared_ptr< IWorkingMemHandle >> memHandles)
void UnloadMemHandles(NetworkId networkId)