aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2023-04-27 17:10:36 +0100
committerSiCong Li <sicong.li@arm.com>2023-04-28 09:53:34 +0000
commiteaae8999ac8027a5fb96162061ad8ccc490515cb (patch)
tree2d0ae66bf8fbe5b4cda72459bf4a7df21bf5cc36
parent99b6fd210d84511b5d746871b8510a482a2950b8 (diff)
downloadComputeLibrary-eaae8999ac8027a5fb96162061ad8ccc490515cb.tar.gz
Fix OMPScheduler run_workloads single thread issue
Resolves COMPMID-6032 Signed-off-by: SiCong Li <sicong.li@arm.com> Change-Id: Icca60deac7308173fc3a8282af91434b4d1c0b06 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9520 Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/runtime/OMP/OMPScheduler.cpp11
1 files changed, 6 insertions, 5 deletions
diff --git a/src/runtime/OMP/OMPScheduler.cpp b/src/runtime/OMP/OMPScheduler.cpp
index 7287f9f3f7..b0a553212a 100644
--- a/src/runtime/OMP/OMPScheduler.cpp
+++ b/src/runtime/OMP/OMPScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,7 +59,7 @@ void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win
ARM_COMPUTE_ERROR_ON_MSG(hints.strategy() == StrategyHint::DYNAMIC,
"Dynamic scheduling is not supported in OMPScheduler");
- const Window & max_window = window;
+ const Window &max_window = window;
const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
const unsigned int num_threads = std::min(num_iterations, _num_threads);
@@ -76,7 +76,8 @@ void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win
for(unsigned int t = 0; t < num_windows; t++)
{
//Capture 't' by copy, all the other variables by reference:
- workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo &info) {
+ workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo & info)
+ {
Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
win.validate();
kernel->run_op(tensors, win, info);
@@ -91,7 +92,7 @@ void OMPScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload>
const unsigned int amount_of_work = static_cast<unsigned int>(workloads.size());
const unsigned int num_threads_to_use = std::min(_num_threads, amount_of_work);
- if(amount_of_work < 1 || num_threads_to_use == 1)
+ if(num_threads_to_use < 1)
{
return;
}
@@ -99,7 +100,7 @@ void OMPScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload>
ThreadInfo info;
info.cpu_info = &cpu_info();
info.num_threads = num_threads_to_use;
-#pragma omp parallel for firstprivate(info) num_threads(num_threads_to_use) default(shared) proc_bind(close) schedule(static, 1)
+ #pragma omp parallel for firstprivate(info) num_threads(num_threads_to_use) default(shared) proc_bind(close) schedule(static, 1)
for(unsigned int wid = 0; wid < amount_of_work; ++wid)
{
const int tid = omp_get_thread_num();