aboutsummaryrefslogtreecommitdiff
path: root/ArmnnDriverImpl.cpp
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2022-02-07 13:12:24 +0000
committerDavid Monahan <david.monahan@arm.com>2022-02-16 14:42:00 +0000
commitd1a947fd487eb7674271f759e43507d10e932ab0 (patch)
tree17ad41e29935a657c5025485173b4ded85c4a47c /ArmnnDriverImpl.cpp
parent29520c4c785fff938586ec231cbf5e51dadbf614 (diff)
downloadandroid-nn-driver-d1a947fd487eb7674271f759e43507d10e932ab0.tar.gz
Revert "Revert "IVGCVSW-6700 Enable import aligned host memory in android-nn-driver""
This reverts commit 8069603dc44b7673b356f66517cd8b25af8080f0. * Reason for revert: Try reenable import aligned host memory in android-nn-driver * Added a check to ArmNNDriverImpl.cpp to not call ExecuteWithDummyInputs with GpuAcc * Added new android-nn-driver driver options to enable / disable Import and Export * Import is disabled by default for now due to conv2d issues * Export is enabled by default !armnn:7147 Change-Id: I91110c58ebb3931d1c458e3774944e55c1250dd8 Signed-off-by: David Monahan <David.Monahan@arm.com>
Diffstat (limited to 'ArmnnDriverImpl.cpp')
-rw-r--r--ArmnnDriverImpl.cpp40
1 files changed, 23 insertions, 17 deletions
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 0b3b9191..89fa54fc 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -202,30 +202,36 @@ Return<V1_0::ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
options.GetRequestInputsAndOutputsDumpDir(),
options.IsGpuProfilingEnabled(),
options.isAsyncModelExecutionEnabled(),
- options.getNoOfArmnnThreads()));
+ options.getNoOfArmnnThreads(),
+ options.isImportEnabled(),
+ options.isExportEnabled()));
- // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
- // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
- if (!preparedModel->ExecuteWithDummyInputs())
+ if (std::find(options.GetBackends().begin(),
+ options.GetBackends().end(),
+ armnn::Compute::GpuAcc) != options.GetBackends().end())
{
- return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
- }
-
- if (clTunedParameters &&
- options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
- {
- // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file.
- try
+ // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
+ // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
+ if (!preparedModel->ExecuteWithDummyInputs())
{
- clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
+ return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
}
- catch (std::exception& error)
+
+ if (clTunedParameters &&
+ options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
{
- ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
- options.GetClTunedParametersFile().c_str(), error.what());
+ // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file
+ try
+ {
+ clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
+ }
+ catch (std::exception& error)
+ {
+ ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
+ options.GetClTunedParametersFile().c_str(), error.what());
+ }
}
}
-
NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel);
return V1_0::ErrorStatus::NONE;