From d1a947fd487eb7674271f759e43507d10e932ab0 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 7 Feb 2022 13:12:24 +0000 Subject: Revert "Revert "IVGCVSW-6700 Enable import aligned host memory in android-nn-driver"" This reverts commit 8069603dc44b7673b356f66517cd8b25af8080f0. * Reason for revert: Try reenable import aligned host memory in android-nn-driver * Added a check to ArmNNDriverImpl.cpp to not call ExecuteWithDummyInputs with GpuAcc * Added new android-nn-driver driver options to enable / disable Import and Export * Import is disabled by default for now due to conv2d issues * Export is enabled by default !armnn:7147 Change-Id: I91110c58ebb3931d1c458e3774944e55c1250dd8 Signed-off-by: David Monahan --- 1.3/ArmnnDriverImpl.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to '1.3') diff --git a/1.3/ArmnnDriverImpl.cpp b/1.3/ArmnnDriverImpl.cpp index c8b1d968..474e1c1f 100644 --- a/1.3/ArmnnDriverImpl.cpp +++ b/1.3/ArmnnDriverImpl.cpp @@ -281,7 +281,9 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_3( options.IsGpuProfilingEnabled(), priority, options.isAsyncModelExecutionEnabled(), - options.getNoOfArmnnThreads())); + options.getNoOfArmnnThreads(), + options.isImportEnabled(), + options.isExportEnabled())); // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if // this is enabled) before the first 'real' inference which removes the overhead of the first inference. @@ -645,6 +647,8 @@ Return ArmnnDriverImpl::prepareModelFromCache_1_3( V1_3::Priority::MEDIUM, options.isAsyncModelExecutionEnabled(), options.getNoOfArmnnThreads(), + options.isImportEnabled(), + options.isExportEnabled(), true)); NotifyCallbackAndCheck(cb, V1_3::ErrorStatus::NONE, preparedModel.release()); -- cgit v1.2.1