aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDana Zlotnik <dankir01@e109190.kfn.arm.com>2021-11-10 11:50:58 +0200
committerDana Zlotnik <dana.zlotnik@arm.com>2021-11-16 11:54:34 +0000
commitd7154dbf0f4a347f2f35f2475a893f1631c5ee1a (patch)
tree2a9e3d6ef6eff030f6bccb43650884a7aa3c1941 /src
parente7a5b0e133b977ef80d31e86f4eb6e94eae5ba17 (diff)
downloadComputeLibrary-d7154dbf0f4a347f2f35f2475a893f1631c5ee1a.tar.gz
Implement 1D Adaptive Workload Splitting in CPPScheduler
Resolves COMPMID-4649 Change-Id: I941d2f8a40737ff05c49f6695a42884731ef2dc9 Signed-off-by: Dana Zlotnik <dana.zlotnik@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6656 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEPadLayerKernel.cpp13
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp13
-rw-r--r--src/cpu/kernels/CpuAddKernel.cpp13
-rw-r--r--src/cpu/kernels/CpuIm2ColKernel.cpp13
-rw-r--r--src/cpu/kernels/CpuReshapeKernel.cpp13
-rw-r--r--src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h15
-rw-r--r--src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp13
-rw-r--r--src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp13
-rw-r--r--src/runtime/IScheduler.cpp37
9 files changed, 118 insertions, 25 deletions
diff --git a/src/core/NEON/kernels/NEPadLayerKernel.cpp b/src/core/NEON/kernels/NEPadLayerKernel.cpp
index 60986812be..2e5e9f76be 100644
--- a/src/core/NEON/kernels/NEPadLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEPadLayerKernel.cpp
@@ -261,9 +261,16 @@ void NEPadLayerKernel::run(const Window &window, const ThreadInfo &info)
size_t NEPadLayerKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
} // namespace arm_compute
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index 70ab06fc8a..4a6468d022 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -232,9 +232,16 @@ Status CpuActivationKernel::validate(const ITensorInfo *src, const ITensorInfo *
size_t CpuActivationKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
void CpuActivationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
diff --git a/src/cpu/kernels/CpuAddKernel.cpp b/src/cpu/kernels/CpuAddKernel.cpp
index 77aa1ffb3f..73c1fda711 100644
--- a/src/cpu/kernels/CpuAddKernel.cpp
+++ b/src/cpu/kernels/CpuAddKernel.cpp
@@ -294,9 +294,16 @@ const char *CpuAddKernel::name() const
size_t CpuAddKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
} // namespace kernels
diff --git a/src/cpu/kernels/CpuIm2ColKernel.cpp b/src/cpu/kernels/CpuIm2ColKernel.cpp
index 5e3385d4ab..ecd1748a44 100644
--- a/src/cpu/kernels/CpuIm2ColKernel.cpp
+++ b/src/cpu/kernels/CpuIm2ColKernel.cpp
@@ -446,9 +446,16 @@ const char *CpuIm2ColKernel::name() const
size_t CpuIm2ColKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
} // namespace kernels
} // namespace cpu
diff --git a/src/cpu/kernels/CpuReshapeKernel.cpp b/src/cpu/kernels/CpuReshapeKernel.cpp
index 91c549643f..e19707dd0c 100644
--- a/src/cpu/kernels/CpuReshapeKernel.cpp
+++ b/src/cpu/kernels/CpuReshapeKernel.cpp
@@ -137,9 +137,16 @@ const char *CpuReshapeKernel::name() const
size_t CpuReshapeKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
} // namespace kernels
diff --git a/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h b/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h
index ff8b0b143f..47548b2538 100644
--- a/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h
+++ b/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h
@@ -120,13 +120,20 @@ public:
* @param[in] platform The CPU platform used to create the context.
* @param[in] thread_count Number of threads in the execution.
*
- * @return[out] small_network_mws Minimum workload size for requsted configuration.
+ * @return[out] small_network_mws Minimum workload size for requested configuration.
*/
size_t get_mws(const CPUInfo &platform, size_t thread_count) const override
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 3072;
+ }
+ else
+ {
+ return 4096;
+ }
}
private:
diff --git a/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp b/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp
index a71864c10c..934e38b054 100644
--- a/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp
+++ b/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp
@@ -357,9 +357,16 @@ const char *CpuDepthwiseConv2dAssemblyWrapperKernel::name() const
size_t CpuDepthwiseConv2dAssemblyWrapperKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
} // namespace kernels
} // namespace cpu
diff --git a/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp b/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
index f9c11fd4bd..78ac134604 100644
--- a/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
+++ b/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
@@ -277,9 +277,16 @@ void CpuPool2dAssemblyWrapperKernel::create_arm_pooling_requant(const ITensorInf
size_t CpuPool2dAssemblyWrapperKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
{
- ARM_COMPUTE_UNUSED(platform, thread_count);
-
- return ICPPKernel::small_network_mws;
+ ARM_COMPUTE_UNUSED(thread_count);
+ // Tuning results that gave optimized results in performance investigation
+ if (platform.get_cpu_model() == CPUModel::A73 )
+ {
+ return 10240;
+ }
+ else
+ {
+ return 9216;
+ }
}
} // namespace kernels
} // namespace cpu
diff --git a/src/runtime/IScheduler.cpp b/src/runtime/IScheduler.cpp
index 004b8a46b6..1d068c9b38 100644
--- a/src/runtime/IScheduler.cpp
+++ b/src/runtime/IScheduler.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/CPP/ICPPKernel.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Log.h"
#include "arm_compute/core/Window.h"
#include "src/common/cpuinfo/CpuInfo.h"
#include "src/runtime/SchedulerUtils.h"
@@ -138,6 +139,9 @@ void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, const W
default:
ARM_COMPUTE_ERROR("Unknown strategy");
}
+ // Make sure the smallest window is larger than minimim workload size
+ num_windows = adjust_num_of_windows(max_window, hints.split_dimension(), num_windows, *kernel, cpu_info());
+
std::vector<IScheduler::Workload> workloads(num_windows);
for(unsigned int t = 0; t < num_windows; ++t)
{
@@ -171,4 +175,37 @@ void IScheduler::run_tagged_workloads(std::vector<Workload> &workloads, const ch
run_workloads(workloads);
}
+std::size_t IScheduler::adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t init_num_windows, const ICPPKernel &kernel, const CPUInfo &cpu_info)
+{
+ // Mitigation of the narrow split issue, which occurs when the split dimension is too small to split (hence "narrow").
+ if(window.num_iterations(split_dimension) < init_num_windows )
+ {
+ auto recommended_split_dim = Window::DimX;
+ for(std::size_t dims = Window::DimY; dims <= Window::DimW; ++dims)
+ {
+ if(window.num_iterations(recommended_split_dim) < window.num_iterations(dims))
+ {
+ recommended_split_dim = dims;
+ }
+ }
+ ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("%lu dimension is not a suitable dimension to split the workload. Recommended: %lu recommended_split_dim", split_dimension,
+ recommended_split_dim);
+ }
+
+ for(auto t = init_num_windows; t > 0; --t) // Trying the highest number of windows ,init_num_windows, first
+ {
+ // Try splitting the workload into t, subject to each subworkload size <= mws.
+ if((window.num_iterations(split_dimension) / kernel.get_mws(cpu_info, t)) >= t)
+ {
+ if(t != init_num_windows)
+ {
+ ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using a different thread count than the one assigned by the user.");
+ }
+ return t;
+ }
+ }
+ ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using single thread instead of the thread count assigned by the user.");
+ return 1; // If the workload is so small that it can't be split, we should run a single thread
+}
+
} // namespace arm_compute