aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/IScheduler.h
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-05-25 13:32:10 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:54 +0000
commit52ecb06b5627902a2f4514fba977e98454af4872 (patch)
tree7e66afcd6a81d2a5d7f886d5d2d0f4e27cc6c4d1 /arm_compute/runtime/IScheduler.h
parentdf473eab0ab8a52e6b58e0f6442b39ba4c1d68ea (diff)
downloadComputeLibrary-52ecb06b5627902a2f4514fba977e98454af4872.tar.gz
COMPMID-1180: Add support for bucket multi-threading (Part 1)
- Add an entry point to allow the user to parallelise an arbitrary queue of workloads (Will be used to interleave GEMM / BufferManager) - Added a ThreadFeeder which acts as a thread-safe work distributor Change-Id: I3a84fb7446c453cfcd337e21338c2ccf9f29f7b3 Note: This patch doesn't introduce any change in the default strategy, therefore it shouldn't have any impact on the performance Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133058 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/runtime/IScheduler.h')
-rw-r--r--arm_compute/runtime/IScheduler.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/arm_compute/runtime/IScheduler.h b/arm_compute/runtime/IScheduler.h
index a0bcada722..76ff5a3de0 100644
--- a/arm_compute/runtime/IScheduler.h
+++ b/arm_compute/runtime/IScheduler.h
@@ -26,6 +26,8 @@
#include "arm_compute/core/CPP/CPPTypes.h"
+#include <functional>
+
namespace arm_compute
{
class ICPPKernel;
@@ -34,6 +36,8 @@ class ICPPKernel;
class IScheduler
{
public:
+ /** Signature for the workloads to execute */
+ using Workload = std::function<void(const ThreadInfo &)>;
/** Default constructor. */
IScheduler();
@@ -59,6 +63,14 @@ public:
*/
virtual void schedule(ICPPKernel *kernel, unsigned int split_dimension) = 0;
+ /** Execute all the passed workloads
+ *
+ * @note there is no guarantee regarding the order in which the workloads will be executed or whether or not they will be executed in parallel.
+ *
+ * @param[in] workloads Array of workloads to run
+ */
+ virtual void run_workloads(std::vector<Workload> &workloads) = 0;
+
/** Get CPU info.
*
* @return CPU info.