aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2021-05-26 16:01:08 +0100
committerKevin May <kevin.may@arm.com>2021-05-26 16:19:38 +0100
commit94dd4dbbb2b59f47e38cf2f1280c4b25be45ea64 (patch)
treeb179186bcac41484dad37bcee18a705a05e462f6
parentb4b3ac91990eb5deaffca2300319f2ddf7aa0886 (diff)
downloadarmnn-94dd4dbbb2b59f47e38cf2f1280c4b25be45ea64.tar.gz
IVGCVSW-6009 Enable creating thread pool with 1 thread
* Allow the user to use create a tread pool with a single thread * This is in keeping with how the android-nn-driver was implemented * Add it to ExecuteNetwork thread pool creation Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I05b8048a9e0e45ae11d2b585080af28d9d008d81
-rw-r--r--src/armnn/LoadedNetwork.cpp2
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp2
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp8
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp6
-rw-r--r--tests/InferenceModel.hpp2
5 files changed, 6 insertions, 14 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 25f8ddf6e5..1a94828841 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -170,7 +170,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
// Create the thread pool which will have working memory handles assigned to each thread
// Should occur after factories are registered so that the WorkingMemHandles can be created
- if (m_NetworkProperties.m_NumThreads > 1 && networkProperties.m_AsyncEnabled)
+ if (m_NetworkProperties.m_NumThreads > 0 && networkProperties.m_AsyncEnabled)
{
CreateThreadPool(m_NetworkProperties.m_NumThreads);
}
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index cd760a8199..e8d5b1860c 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -440,7 +440,7 @@ int MainImpl(const ExecuteNetworkParams& params,
}
}
// Asynchronous execution using the Arm NN thread pool
- else if (params.m_ThreadPoolSize >= 2)
+ else if (params.m_ThreadPoolSize >= 1)
{
try
{
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 189ece25a7..4002e89eba 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -137,14 +137,6 @@ void ExecuteNetworkParams::ValidateParams()
CheckModelFormat(m_ModelFormat);
- // Check number of simultaneous iterations
- // Testing std::launch::async with a single iteration is possible if concurrent is manually set
- if ((m_SimultaneousIterations <= 1 && m_ThreadPoolSize > 1) ||
- (m_SimultaneousIterations <= 1 && !m_Concurrent))
- {
- ARMNN_LOG(fatal) << "simultaneous-iterations cannot be less than 2.";
- }
-
// Check input tensor shapes
if ((m_InputTensorShapes.size() != 0) &&
(m_InputTensorShapes.size() != m_InputNames.size()))
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 1f57f85252..25ddecf3ba 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -292,8 +292,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("thread-pool-size",
"Number of Arm NN threads to use when running the network asynchronously via the Arm NN thread pool. "
- "The default is set to 1",
- cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("1"));
+ "The default is set to 0",
+ cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"));
m_CxxOptions.add_options("c) Optimization")
("bf16-turbo-mode",
@@ -461,7 +461,7 @@ void ProgramOptions::ParseOptions(int ac, const char* av[])
}
// Set concurrent to true if the user expects to run inferences asynchronously
- if (m_ExNetParams.m_SimultaneousIterations > 1)
+ if (m_ExNetParams.m_SimultaneousIterations > 1 || m_ExNetParams.m_ThreadPoolSize > 0)
{
m_ExNetParams.m_Concurrent = true;
}
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 7c51011a22..fd2ab5f6ba 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -121,7 +121,7 @@ struct Params
, m_NumberOfThreads(0)
, m_MLGOTuningFilePath("")
, m_AsyncEnabled(false)
- , m_ThreadPoolSize(1)
+ , m_ThreadPoolSize(0)
{}
};