diff options
Diffstat (limited to 'tests')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 1 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkParams.hpp | 1 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp | 6 | ||||
-rw-r--r-- | tests/InferenceModel.hpp | 5 |
4 files changed, 12 insertions, 1 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 5df5dfbce7..c19f519c73 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -308,6 +308,7 @@ int MainImpl(const ExecuteNetworkParams& params, inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath; inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork; inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath; + inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads; for(const std::string& inputName: params.m_InputNames) { diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp index 56d32907b8..830270adbc 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp @@ -34,6 +34,7 @@ struct ExecuteNetworkParams size_t m_Iterations; std::string m_ModelFormat; std::string m_ModelPath; + unsigned int m_NumberOfThreads; std::vector<std::string> m_OutputNames; std::vector<std::string> m_OutputTensorFiles; std::vector<std::string> m_OutputTypes; diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp index a080e57d0c..73da1f1d1d 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp @@ -276,6 +276,12 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", "performance improvements but may result in reduced or different precision.", cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true")) + ("number-of-threads", + "Assign the number of threads used by the CpuAcc backend. " + "Input value must be between 1 and 64. " + "Default is set to 0 (Backend will decide number of threads to use).", + cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0")) + ("save-cached-network", "Enables saving of the cached network to a file given with the cached-network-filepath option. " "See also --cached-network-filepath", diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 936d0bf9ea..d20bb2271f 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -99,6 +99,7 @@ struct Params bool m_EnableFastMath; bool m_SaveCachedNetwork; std::string m_CachedNetworkFilePath; + unsigned int m_NumberOfThreads; Params() : m_ComputeDevices{} @@ -113,6 +114,7 @@ struct Params , m_EnableFastMath(false) , m_SaveCachedNetwork(false) , m_CachedNetworkFilePath("") + , m_NumberOfThreads(0) {} }; @@ -436,7 +438,8 @@ public: }); armnn::BackendOptions cpuAcc("CpuAcc", { - { "FastMathEnabled", params.m_EnableFastMath } + { "FastMathEnabled", params.m_EnableFastMath }, + { "NumberOfThreads", params.m_NumberOfThreads } }); options.m_ModelOptions.push_back(gpuAcc); options.m_ModelOptions.push_back(cpuAcc); |