diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-06-11 15:04:02 +0100 |
---|---|---|
committer | Finn Williams <Finn.Williams@arm.com> | 2021-06-23 13:21:01 +0100 |
commit | ca3a3e0fd86a07de9e073ba31dc2b42d6ca84536 (patch) | |
tree | 755d270b8c743a24e7ead37dc84f6ca69ad783c4 /ArmnnPreparedModel_1_3.hpp | |
parent | dc873f6309784d5fd6914ca5432d32ae6c3de0c2 (diff) | |
download | android-nn-driver-ca3a3e0fd86a07de9e073ba31dc2b42d6ca84536.tar.gz |
IVGCVSW-6062 Rework the async threadpool
!armnn:5801
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I9964d0899ce752441f380edddbd974010257b2dd
Diffstat (limited to 'ArmnnPreparedModel_1_3.hpp')
-rw-r--r-- | ArmnnPreparedModel_1_3.hpp | 30 |
1 files changed, 6 insertions, 24 deletions
diff --git a/ArmnnPreparedModel_1_3.hpp b/ArmnnPreparedModel_1_3.hpp index 11299cc4..46798cde 100644 --- a/ArmnnPreparedModel_1_3.hpp +++ b/ArmnnPreparedModel_1_3.hpp @@ -12,6 +12,8 @@ #include <NeuralNetworks.h> #include <armnn/ArmNN.hpp> +#include <armnn/Threadpool.hpp> + #include <string> #include <vector> @@ -52,7 +54,8 @@ public: const std::string& requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, V1_3::Priority priority = V1_3::Priority::MEDIUM, - const bool asyncModelExecutionEnabled = false); + const bool asyncModelExecutionEnabled = false, + const unsigned int numberOfThreads = 1); virtual ~ArmnnPreparedModel_1_3(); @@ -131,28 +134,6 @@ private: void Notify(armnn::Status status, armnn::InferenceTimingPair timeTaken) override; - // Retrieve the Arm NN Status from the AsyncExecutionCallback that has been notified - virtual armnn::Status GetStatus() const override - { - return armnn::Status::Success; - } - - // Block the calling thread until the AsyncExecutionCallback object allows it to proceed - virtual void Wait() const override - {} - - // Retrieve the start time before executing the inference - virtual armnn::HighResolutionClock GetStartTime() const override - { - return std::chrono::high_resolution_clock::now(); - } - - // Retrieve the time after executing the inference - virtual armnn::HighResolutionClock GetEndTime() const override - { - return std::chrono::high_resolution_clock::now(); - } - ArmnnPreparedModel_1_3<HalVersion>* m_Model; std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>> m_MemPools; std::vector<V1_2::OutputShape> m_OutputShapes; @@ -196,6 +177,7 @@ private: armnn::NetworkId m_NetworkId; armnn::IRuntime* m_Runtime; + std::unique_ptr<armnn::Threadpool> m_Threadpool; V1_3::Model m_Model; // There must be a single RequestThread for all ArmnnPreparedModel objects to ensure serial execution of workloads // It is specific to this class, so it is declared as static here @@ -205,7 +187,7 @@ private: const bool m_GpuProfilingEnabled; V1_3::Priority m_ModelPriority; - std::unique_ptr<IWorkingMemHandle> m_WorkingMemHandle; + std::shared_ptr<IWorkingMemHandle> m_WorkingMemHandle; const bool m_AsyncModelExecutionEnabled; }; |