diff options
author | cfRod <crefeda.rodrigues@arm.com> | 2022-11-15 15:33:12 +0000 |
---|---|---|
committer | Crefeda Rodrigues <crefeda.rodrigues@arm.com> | 2022-11-18 16:57:04 +0000 |
commit | d2475c721ef892c6522d43edcc67a6710de8039b (patch) | |
tree | 7a76f19e198f8e73f39a2fd18b804575b9f538ab /src | |
parent | 38ac410b14678c90cf1a2e8922ab3572b42d1c77 (diff) | |
download | ComputeLibrary-d2475c721ef892c6522d43edcc67a6710de8039b.tar.gz |
Add num_threads_to_use to OMPScheduler based on workload size
Fixes benchdnn test failures in ONCPUML-1104 when num_threads
is greater than workload size.
Signed-off-by: Crefeda Rodrigues <crefeda.rodrigues@arm.com>
Change-Id: Ic351a3ab5b548aa1843042a053130b02d0f1d40e
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8655
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/runtime/OMP/OMPScheduler.cpp | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/src/runtime/OMP/OMPScheduler.cpp b/src/runtime/OMP/OMPScheduler.cpp index aad24b4f01..7287f9f3f7 100644 --- a/src/runtime/OMP/OMPScheduler.cpp +++ b/src/runtime/OMP/OMPScheduler.cpp @@ -59,7 +59,7 @@ void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win ARM_COMPUTE_ERROR_ON_MSG(hints.strategy() == StrategyHint::DYNAMIC, "Dynamic scheduling is not supported in OMPScheduler"); - const Window &max_window = window; + const Window & max_window = window; const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension()); const unsigned int num_threads = std::min(num_iterations, _num_threads); @@ -76,8 +76,7 @@ void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win for(unsigned int t = 0; t < num_windows; t++) { //Capture 't' by copy, all the other variables by reference: - workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo & info) - { + workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo &info) { Window win = max_window.split_window(hints.split_dimension(), t, num_windows); win.validate(); kernel->run_op(tensors, win, info); @@ -89,19 +88,22 @@ void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win #ifndef DOXYGEN_SKIP_THIS void OMPScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload> &workloads) { - const unsigned int amount_of_work = static_cast<unsigned int>(workloads.size()); - if(amount_of_work < 1 || _num_threads == 1) + const unsigned int amount_of_work = static_cast<unsigned int>(workloads.size()); + const unsigned int num_threads_to_use = std::min(_num_threads, amount_of_work); + + if(amount_of_work < 1 || num_threads_to_use == 1) { return; } ThreadInfo info; info.cpu_info = &cpu_info(); - info.num_threads = _num_threads; - #pragma omp parallel for firstprivate(info) num_threads(_num_threads) default(shared) proc_bind(close) schedule(static, 1) + info.num_threads = num_threads_to_use; +#pragma omp parallel for firstprivate(info) num_threads(num_threads_to_use) default(shared) proc_bind(close) schedule(static, 1) for(unsigned int wid = 0; wid < amount_of_work; ++wid) { - const int tid = omp_get_thread_num(); + const int tid = omp_get_thread_num(); + info.thread_id = tid; workloads[wid](info); } |