From d7154dbf0f4a347f2f35f2475a893f1631c5ee1a Mon Sep 17 00:00:00 2001 From: Dana Zlotnik Date: Wed, 10 Nov 2021 11:50:58 +0200 Subject: Implement 1D Adaptive Workload Splitting in CPPScheduler Resolves COMPMID-4649 Change-Id: I941d2f8a40737ff05c49f6695a42884731ef2dc9 Signed-off-by: Dana Zlotnik Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6656 Tested-by: Arm Jenkins Reviewed-by: SiCong Li Comments-Addressed: Arm Jenkins --- arm_compute/core/CPP/ICPPKernel.h | 3 +- arm_compute/runtime/IScheduler.h | 13 ++++++++ src/core/NEON/kernels/NEPadLayerKernel.cpp | 13 ++++++-- src/cpu/kernels/CpuActivationKernel.cpp | 13 ++++++-- src/cpu/kernels/CpuAddKernel.cpp | 13 ++++++-- src/cpu/kernels/CpuIm2ColKernel.cpp | 13 ++++++-- src/cpu/kernels/CpuReshapeKernel.cpp | 13 ++++++-- .../assembly/CpuGemmAssemblyWrapperKernel.h | 15 ++++++--- .../CpuDepthwiseConv2dAssemblyWrapperKernel.cpp | 13 ++++++-- .../internal/CpuPool2dAssemblyWrapperKernel.cpp | 13 ++++++-- src/runtime/IScheduler.cpp | 37 ++++++++++++++++++++++ 11 files changed, 132 insertions(+), 27 deletions(-) diff --git a/arm_compute/core/CPP/ICPPKernel.h b/arm_compute/core/CPP/ICPPKernel.h index af4a896a6c..4697316379 100644 --- a/arm_compute/core/CPP/ICPPKernel.h +++ b/arm_compute/core/CPP/ICPPKernel.h @@ -38,8 +38,7 @@ class ITensor; class ICPPKernel : public IKernel { public: - static constexpr size_t default_mws = 128; /* Default minimum workload size value */ - static constexpr size_t small_network_mws = 256; /* Default Minimum workload size value for small networks */ + static constexpr size_t default_mws = 1; /* Default minimum workload size value - no impact */ /** Default destructor */ virtual ~ICPPKernel() = default; diff --git a/arm_compute/runtime/IScheduler.h b/arm_compute/runtime/IScheduler.h index 3759fee8a8..f67357dcfd 100644 --- a/arm_compute/runtime/IScheduler.h +++ b/arm_compute/runtime/IScheduler.h @@ -215,6 +215,19 @@ protected: */ void schedule_common(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors); + /** Adjust the number of windows to the optimize performance + * (used for small workloads where smaller number of threads might improve the performance) + * + * @param[in] window Window to use for kernel execution + * @param[in] split_dimension Axis of dimension to split + * @param[in] init_num_windows Initial number of sub-windows to split + * @param[in] kernel Kernel to execute + * @param[in] cpu_info The CPU platform used to create the context. + * + * @return Adjusted number of windows + */ + std::size_t adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t init_num_windows, const ICPPKernel &kernel, const CPUInfo &cpu_info); + private: unsigned int _num_threads_hint = {}; }; diff --git a/src/core/NEON/kernels/NEPadLayerKernel.cpp b/src/core/NEON/kernels/NEPadLayerKernel.cpp index 60986812be..2e5e9f76be 100644 --- a/src/core/NEON/kernels/NEPadLayerKernel.cpp +++ b/src/core/NEON/kernels/NEPadLayerKernel.cpp @@ -261,9 +261,16 @@ void NEPadLayerKernel::run(const Window &window, const ThreadInfo &info) size_t NEPadLayerKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } } // namespace arm_compute diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp index 70ab06fc8a..4a6468d022 100644 --- a/src/cpu/kernels/CpuActivationKernel.cpp +++ b/src/cpu/kernels/CpuActivationKernel.cpp @@ -232,9 +232,16 @@ Status CpuActivationKernel::validate(const ITensorInfo *src, const ITensorInfo * size_t CpuActivationKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } void CpuActivationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) diff --git a/src/cpu/kernels/CpuAddKernel.cpp b/src/cpu/kernels/CpuAddKernel.cpp index 77aa1ffb3f..73c1fda711 100644 --- a/src/cpu/kernels/CpuAddKernel.cpp +++ b/src/cpu/kernels/CpuAddKernel.cpp @@ -294,9 +294,16 @@ const char *CpuAddKernel::name() const size_t CpuAddKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } } // namespace kernels diff --git a/src/cpu/kernels/CpuIm2ColKernel.cpp b/src/cpu/kernels/CpuIm2ColKernel.cpp index 5e3385d4ab..ecd1748a44 100644 --- a/src/cpu/kernels/CpuIm2ColKernel.cpp +++ b/src/cpu/kernels/CpuIm2ColKernel.cpp @@ -446,9 +446,16 @@ const char *CpuIm2ColKernel::name() const size_t CpuIm2ColKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } } // namespace kernels } // namespace cpu diff --git a/src/cpu/kernels/CpuReshapeKernel.cpp b/src/cpu/kernels/CpuReshapeKernel.cpp index 91c549643f..e19707dd0c 100644 --- a/src/cpu/kernels/CpuReshapeKernel.cpp +++ b/src/cpu/kernels/CpuReshapeKernel.cpp @@ -137,9 +137,16 @@ const char *CpuReshapeKernel::name() const size_t CpuReshapeKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } } // namespace kernels diff --git a/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h b/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h index ff8b0b143f..47548b2538 100644 --- a/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h +++ b/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h @@ -120,13 +120,20 @@ public: * @param[in] platform The CPU platform used to create the context. * @param[in] thread_count Number of threads in the execution. * - * @return[out] small_network_mws Minimum workload size for requsted configuration. + * @return[out] small_network_mws Minimum workload size for requested configuration. */ size_t get_mws(const CPUInfo &platform, size_t thread_count) const override { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 3072; + } + else + { + return 4096; + } } private: diff --git a/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp b/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp index a71864c10c..934e38b054 100644 --- a/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp +++ b/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp @@ -357,9 +357,16 @@ const char *CpuDepthwiseConv2dAssemblyWrapperKernel::name() const size_t CpuDepthwiseConv2dAssemblyWrapperKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } } // namespace kernels } // namespace cpu diff --git a/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp b/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp index f9c11fd4bd..78ac134604 100644 --- a/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp +++ b/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp @@ -277,9 +277,16 @@ void CpuPool2dAssemblyWrapperKernel::create_arm_pooling_requant(const ITensorInf size_t CpuPool2dAssemblyWrapperKernel::get_mws(const CPUInfo &platform, size_t thread_count) const { - ARM_COMPUTE_UNUSED(platform, thread_count); - - return ICPPKernel::small_network_mws; + ARM_COMPUTE_UNUSED(thread_count); + // Tuning results that gave optimized results in performance investigation + if (platform.get_cpu_model() == CPUModel::A73 ) + { + return 10240; + } + else + { + return 9216; + } } } // namespace kernels } // namespace cpu diff --git a/src/runtime/IScheduler.cpp b/src/runtime/IScheduler.cpp index 004b8a46b6..1d068c9b38 100644 --- a/src/runtime/IScheduler.cpp +++ b/src/runtime/IScheduler.cpp @@ -25,6 +25,7 @@ #include "arm_compute/core/CPP/ICPPKernel.h" #include "arm_compute/core/Error.h" +#include "arm_compute/core/Log.h" #include "arm_compute/core/Window.h" #include "src/common/cpuinfo/CpuInfo.h" #include "src/runtime/SchedulerUtils.h" @@ -138,6 +139,9 @@ void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, const W default: ARM_COMPUTE_ERROR("Unknown strategy"); } + // Make sure the smallest window is larger than minimim workload size + num_windows = adjust_num_of_windows(max_window, hints.split_dimension(), num_windows, *kernel, cpu_info()); + std::vector workloads(num_windows); for(unsigned int t = 0; t < num_windows; ++t) { @@ -171,4 +175,37 @@ void IScheduler::run_tagged_workloads(std::vector &workloads, const ch run_workloads(workloads); } +std::size_t IScheduler::adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t init_num_windows, const ICPPKernel &kernel, const CPUInfo &cpu_info) +{ + // Mitigation of the narrow split issue, which occurs when the split dimension is too small to split (hence "narrow"). + if(window.num_iterations(split_dimension) < init_num_windows ) + { + auto recommended_split_dim = Window::DimX; + for(std::size_t dims = Window::DimY; dims <= Window::DimW; ++dims) + { + if(window.num_iterations(recommended_split_dim) < window.num_iterations(dims)) + { + recommended_split_dim = dims; + } + } + ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("%lu dimension is not a suitable dimension to split the workload. Recommended: %lu recommended_split_dim", split_dimension, + recommended_split_dim); + } + + for(auto t = init_num_windows; t > 0; --t) // Trying the highest number of windows ,init_num_windows, first + { + // Try splitting the workload into t, subject to each subworkload size <= mws. + if((window.num_iterations(split_dimension) / kernel.get_mws(cpu_info, t)) >= t) + { + if(t != init_num_windows) + { + ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using a different thread count than the one assigned by the user."); + } + return t; + } + } + ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using single thread instead of the thread count assigned by the user."); + return 1; // If the workload is so small that it can't be split, we should run a single thread +} + } // namespace arm_compute -- cgit v1.2.1