aboutsummaryrefslogtreecommitdiff
path: root/ArmnnPreparedModel_1_2.hpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-06-11 15:04:02 +0100
committerFinn Williams <Finn.Williams@arm.com>2021-06-23 13:21:01 +0100
commitca3a3e0fd86a07de9e073ba31dc2b42d6ca84536 (patch)
tree755d270b8c743a24e7ead37dc84f6ca69ad783c4 /ArmnnPreparedModel_1_2.hpp
parentdc873f6309784d5fd6914ca5432d32ae6c3de0c2 (diff)
downloadandroid-nn-driver-ca3a3e0fd86a07de9e073ba31dc2b42d6ca84536.tar.gz
IVGCVSW-6062 Rework the async threadpool
!armnn:5801 Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I9964d0899ce752441f380edddbd974010257b2dd
Diffstat (limited to 'ArmnnPreparedModel_1_2.hpp')
-rw-r--r--ArmnnPreparedModel_1_2.hpp46
1 files changed, 13 insertions, 33 deletions
diff --git a/ArmnnPreparedModel_1_2.hpp b/ArmnnPreparedModel_1_2.hpp
index 6c630c56..4ee2b817 100644
--- a/ArmnnPreparedModel_1_2.hpp
+++ b/ArmnnPreparedModel_1_2.hpp
@@ -12,6 +12,7 @@
#include <NeuralNetworks.h>
#include <armnn/ArmNN.hpp>
+#include <armnn/Threadpool.hpp>
#include <string>
#include <vector>
@@ -45,7 +46,8 @@ public:
const HalModel& model,
const std::string& requestInputsAndOutputsDumpDir,
const bool gpuProfilingEnabled,
- const bool asyncModelExecutionEnabled = false);
+ const bool asyncModelExecutionEnabled = false,
+ const unsigned int numberOfThreads = 1);
virtual ~ArmnnPreparedModel_1_2();
@@ -98,28 +100,6 @@ private:
void Notify(armnn::Status status, armnn::InferenceTimingPair timeTaken) override;
- // Retrieve the Arm NN Status from the AsyncExecutionCallback that has been notified
- virtual armnn::Status GetStatus() const override
- {
- return armnn::Status::Success;
- }
-
- // Block the calling thread until the AsyncExecutionCallback object allows it to proceed
- virtual void Wait() const override
- {}
-
- // Retrieve the start time before executing the inference
- virtual armnn::HighResolutionClock GetStartTime() const override
- {
- return std::chrono::high_resolution_clock::now();
- }
-
- // Retrieve the time after executing the inference
- virtual armnn::HighResolutionClock GetEndTime() const override
- {
- return std::chrono::high_resolution_clock::now();
- }
-
ArmnnPreparedModel_1_2<HalVersion>* m_Model;
std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>> m_MemPools;
std::vector<V1_2::OutputShape> m_OutputShapes;
@@ -161,20 +141,20 @@ private:
std::shared_ptr<armnn::OutputTensors>& outputTensors,
CallbackContext m_CallbackContext);
- armnn::NetworkId m_NetworkId;
- armnn::IRuntime* m_Runtime;
- V1_2::Model m_Model;
+ armnn::NetworkId m_NetworkId;
+ armnn::IRuntime* m_Runtime;
+ std::unique_ptr<armnn::Threadpool> m_Threadpool;
+ V1_2::Model m_Model;
// There must be a single RequestThread for all ArmnnPreparedModel objects to ensure serial execution of workloads
// It is specific to this class, so it is declared as static here
static RequestThread<ArmnnPreparedModel_1_2,
HalVersion,
- CallbackContext_1_2> m_RequestThread;
- uint32_t m_RequestCount;
- const std::string& m_RequestInputsAndOutputsDumpDir;
- const bool m_GpuProfilingEnabled;
-
- std::unique_ptr<IWorkingMemHandle> m_WorkingMemHandle;
- const bool m_AsyncModelExecutionEnabled;
+ CallbackContext_1_2> m_RequestThread;
+ uint32_t m_RequestCount;
+ const std::string& m_RequestInputsAndOutputsDumpDir;
+ const bool m_GpuProfilingEnabled;
+ std::shared_ptr<IWorkingMemHandle> m_WorkingMemHandle;
+ const bool m_AsyncModelExecutionEnabled;
};
}