diff options
-rw-r--r-- | src/armnn/LoadedNetwork.cpp | 2 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 2 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkParams.cpp | 8 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp | 6 | ||||
-rw-r--r-- | tests/InferenceModel.hpp | 2 |
5 files changed, 6 insertions, 14 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 25f8ddf6e5..1a94828841 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -170,7 +170,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net, // Create the thread pool which will have working memory handles assigned to each thread // Should occur after factories are registered so that the WorkingMemHandles can be created - if (m_NetworkProperties.m_NumThreads > 1 && networkProperties.m_AsyncEnabled) + if (m_NetworkProperties.m_NumThreads > 0 && networkProperties.m_AsyncEnabled) { CreateThreadPool(m_NetworkProperties.m_NumThreads); } diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index cd760a8199..e8d5b1860c 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -440,7 +440,7 @@ int MainImpl(const ExecuteNetworkParams& params, } } // Asynchronous execution using the Arm NN thread pool - else if (params.m_ThreadPoolSize >= 2) + else if (params.m_ThreadPoolSize >= 1) { try { diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp index 189ece25a7..4002e89eba 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp @@ -137,14 +137,6 @@ void ExecuteNetworkParams::ValidateParams() CheckModelFormat(m_ModelFormat); - // Check number of simultaneous iterations - // Testing std::launch::async with a single iteration is possible if concurrent is manually set - if ((m_SimultaneousIterations <= 1 && m_ThreadPoolSize > 1) || - (m_SimultaneousIterations <= 1 && !m_Concurrent)) - { - ARMNN_LOG(fatal) << "simultaneous-iterations cannot be less than 2."; - } - // Check input tensor shapes if ((m_InputTensorShapes.size() != 0) && (m_InputTensorShapes.size() != m_InputNames.size())) diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp index 1f57f85252..25ddecf3ba 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp @@ -292,8 +292,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", ("thread-pool-size", "Number of Arm NN threads to use when running the network asynchronously via the Arm NN thread pool. " - "The default is set to 1", - cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("1")); + "The default is set to 0", + cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0")); m_CxxOptions.add_options("c) Optimization") ("bf16-turbo-mode", @@ -461,7 +461,7 @@ void ProgramOptions::ParseOptions(int ac, const char* av[]) } // Set concurrent to true if the user expects to run inferences asynchronously - if (m_ExNetParams.m_SimultaneousIterations > 1) + if (m_ExNetParams.m_SimultaneousIterations > 1 || m_ExNetParams.m_ThreadPoolSize > 0) { m_ExNetParams.m_Concurrent = true; } diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 7c51011a22..fd2ab5f6ba 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -121,7 +121,7 @@ struct Params , m_NumberOfThreads(0) , m_MLGOTuningFilePath("") , m_AsyncEnabled(false) - , m_ThreadPoolSize(1) + , m_ThreadPoolSize(0) {} }; |