From d1a947fd487eb7674271f759e43507d10e932ab0 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 7 Feb 2022 13:12:24 +0000 Subject: Revert "Revert "IVGCVSW-6700 Enable import aligned host memory in android-nn-driver"" This reverts commit 8069603dc44b7673b356f66517cd8b25af8080f0. * Reason for revert: Try reenable import aligned host memory in android-nn-driver * Added a check to ArmNNDriverImpl.cpp to not call ExecuteWithDummyInputs with GpuAcc * Added new android-nn-driver driver options to enable / disable Import and Export * Import is disabled by default for now due to conv2d issues * Export is enabled by default !armnn:7147 Change-Id: I91110c58ebb3931d1c458e3774944e55c1250dd8 Signed-off-by: David Monahan --- 1.2/ArmnnDriverImpl.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to '1.2') diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp index 3274a8ab..1c31384a 100644 --- a/1.2/ArmnnDriverImpl.cpp +++ b/1.2/ArmnnDriverImpl.cpp @@ -267,7 +267,9 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2( options.GetRequestInputsAndOutputsDumpDir(), options.IsGpuProfilingEnabled(), options.isAsyncModelExecutionEnabled(), - options.getNoOfArmnnThreads())); + options.getNoOfArmnnThreads(), + options.isImportEnabled(), + options.isExportEnabled())); // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if // this is enabled) before the first 'real' inference which removes the overhead of the first inference. @@ -630,6 +632,8 @@ Return ArmnnDriverImpl::prepareModelFromCache( options.IsGpuProfilingEnabled(), options.isAsyncModelExecutionEnabled(), options.getNoOfArmnnThreads(), + options.isImportEnabled(), + options.isExportEnabled(), true)); NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel.release()); -- cgit v1.2.1