From d8fb540568b29fd1d81a1cca667a1ad3e33ef5a1 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Wed, 19 May 2021 20:52:00 +0100 Subject: IVGCVSW-5781 Add Async Support to Android-NN-Driver Signed-off-by: Finn Williams Change-Id: I1f13d04100fdb119495b9e3054425bf3babc59f1 --- ArmnnPreparedModel_1_2.hpp | 65 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) (limited to 'ArmnnPreparedModel_1_2.hpp') diff --git a/ArmnnPreparedModel_1_2.hpp b/ArmnnPreparedModel_1_2.hpp index 13d7494e..6c630c56 100644 --- a/ArmnnPreparedModel_1_2.hpp +++ b/ArmnnPreparedModel_1_2.hpp @@ -44,7 +44,8 @@ public: armnn::IRuntime* runtime, const HalModel& model, const std::string& requestInputsAndOutputsDumpDir, - const bool gpuProfilingEnabled); + const bool gpuProfilingEnabled, + const bool asyncModelExecutionEnabled = false); virtual ~ArmnnPreparedModel_1_2(); @@ -76,6 +77,57 @@ public: bool ExecuteWithDummyInputs(); private: + + template + class ArmnnThreadPoolCallback_1_2 : public armnn::IAsyncExecutionCallback + { + public: + ArmnnThreadPoolCallback_1_2(ArmnnPreparedModel_1_2* model, + std::shared_ptr>& pMemPools, + std::vector outputShapes, + std::shared_ptr& inputTensors, + std::shared_ptr& outputTensors, + CallbackContext callbackContext) : + m_Model(model), + m_MemPools(pMemPools), + m_OutputShapes(outputShapes), + m_InputTensors(inputTensors), + m_OutputTensors(outputTensors), + m_CallbackContext(callbackContext) + {} + + void Notify(armnn::Status status, armnn::InferenceTimingPair timeTaken) override; + + // Retrieve the Arm NN Status from the AsyncExecutionCallback that has been notified + virtual armnn::Status GetStatus() const override + { + return armnn::Status::Success; + } + + // Block the calling thread until the AsyncExecutionCallback object allows it to proceed + virtual void Wait() const override + {} + + // Retrieve the start time before executing the inference + virtual armnn::HighResolutionClock GetStartTime() const override + { + return std::chrono::high_resolution_clock::now(); + } + + // Retrieve the time after executing the inference + virtual armnn::HighResolutionClock GetEndTime() const override + { + return std::chrono::high_resolution_clock::now(); + } + + ArmnnPreparedModel_1_2* m_Model; + std::shared_ptr> m_MemPools; + std::vector m_OutputShapes; + std::shared_ptr m_InputTensors; + std::shared_ptr m_OutputTensors; + CallbackContext m_CallbackContext; + }; + Return Execute(const V1_0::Request& request, V1_2::MeasureTiming measureTiming, CallbackAsync_1_2 callback); @@ -101,6 +153,14 @@ private: template void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings); + /// schedule the graph prepared from the request for execution + template + void ScheduleGraphForExecution( + std::shared_ptr>& pMemPools, + std::shared_ptr& inputTensors, + std::shared_ptr& outputTensors, + CallbackContext m_CallbackContext); + armnn::NetworkId m_NetworkId; armnn::IRuntime* m_Runtime; V1_2::Model m_Model; @@ -112,6 +172,9 @@ private: uint32_t m_RequestCount; const std::string& m_RequestInputsAndOutputsDumpDir; const bool m_GpuProfilingEnabled; + + std::unique_ptr m_WorkingMemHandle; + const bool m_AsyncModelExecutionEnabled; }; } -- cgit v1.2.1