aboutsummaryrefslogtreecommitdiff
path: root/1.2/ArmnnDriverImpl.cpp
diff options
context:
space:
mode:
Diffstat (limited to '1.2/ArmnnDriverImpl.cpp')
-rw-r--r--1.2/ArmnnDriverImpl.cpp10
1 files changed, 8 insertions, 2 deletions
diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp
index ccf82d0e..3eae526f 100644
--- a/1.2/ArmnnDriverImpl.cpp
+++ b/1.2/ArmnnDriverImpl.cpp
@@ -188,9 +188,14 @@ Return<V1_0::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(
// Load it into the runtime.
armnn::NetworkId netId = 0;
+ std::string msg;
+ armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
+ MemorySource::Undefined,
+ MemorySource::Undefined,
+ options.getNoOfArmnnThreads());
try
{
- if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
+ if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
{
return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
}
@@ -216,7 +221,8 @@ Return<V1_0::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(
runtime.get(),
model,
options.GetRequestInputsAndOutputsDumpDir(),
- options.IsGpuProfilingEnabled()));
+ options.IsGpuProfilingEnabled(),
+ options.isAsyncModelExecutionEnabled()));
// Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
// this is enabled) before the first 'real' inference which removes the overhead of the first inference.