diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2022-02-07 13:12:24 +0000 |
---|---|---|
committer | Colm Donelan <colm.donelan@arm.com> | 2022-02-16 21:37:18 +0000 |
commit | 558a1d4ed904f5f7d04781bc3405ee77669563d0 (patch) | |
tree | 17ad41e29935a657c5025485173b4ded85c4a47c /ArmnnDriverImpl.cpp | |
parent | f368fbcd023b636d060ca34eb693742490189e98 (diff) | |
download | android-nn-driver-branches/android-nn-driver_22_02.tar.gz |
Revert "Revert "IVGCVSW-6700 Enable import aligned host memory in android-nn-driver""v22.02branches/android-nn-driver_22_02
This reverts commit 8069603dc44b7673b356f66517cd8b25af8080f0.
* Reason for revert: Try reenable import aligned host memory in android-nn-driver
* Added a check to ArmNNDriverImpl.cpp to not call ExecuteWithDummyInputs with GpuAcc
* Added new android-nn-driver driver options to enable / disable Import and Export
* Import is disabled by default for now due to conv2d issues
* Export is enabled by default
!armnn:7147
Change-Id: I91110c58ebb3931d1c458e3774944e55c1250dd8
Signed-off-by: David Monahan <David.Monahan@arm.com>
Diffstat (limited to 'ArmnnDriverImpl.cpp')
-rw-r--r-- | ArmnnDriverImpl.cpp | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp index 0b3b9191..89fa54fc 100644 --- a/ArmnnDriverImpl.cpp +++ b/ArmnnDriverImpl.cpp @@ -202,30 +202,36 @@ Return<V1_0::ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel( options.GetRequestInputsAndOutputsDumpDir(), options.IsGpuProfilingEnabled(), options.isAsyncModelExecutionEnabled(), - options.getNoOfArmnnThreads())); + options.getNoOfArmnnThreads(), + options.isImportEnabled(), + options.isExportEnabled())); - // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if - // this is enabled) before the first 'real' inference which removes the overhead of the first inference. - if (!preparedModel->ExecuteWithDummyInputs()) + if (std::find(options.GetBackends().begin(), + options.GetBackends().end(), + armnn::Compute::GpuAcc) != options.GetBackends().end()) { - return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb); - } - - if (clTunedParameters && - options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters) - { - // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file. - try + // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if + // this is enabled) before the first 'real' inference which removes the overhead of the first inference. + if (!preparedModel->ExecuteWithDummyInputs()) { - clTunedParameters->Save(options.GetClTunedParametersFile().c_str()); + return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb); } - catch (std::exception& error) + + if (clTunedParameters && + options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters) { - ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s", - options.GetClTunedParametersFile().c_str(), error.what()); + // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file + try + { + clTunedParameters->Save(options.GetClTunedParametersFile().c_str()); + } + catch (std::exception& error) + { + ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s", + options.GetClTunedParametersFile().c_str(), error.what()); + } } } - NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel); return V1_0::ErrorStatus::NONE; |