aboutsummaryrefslogtreecommitdiff
path: root/ArmnnPreparedModel.cpp
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2022-02-07 13:12:24 +0000
committerDavid Monahan <david.monahan@arm.com>2022-02-16 14:42:00 +0000
commitd1a947fd487eb7674271f759e43507d10e932ab0 (patch)
tree17ad41e29935a657c5025485173b4ded85c4a47c /ArmnnPreparedModel.cpp
parent29520c4c785fff938586ec231cbf5e51dadbf614 (diff)
downloadandroid-nn-driver-d1a947fd487eb7674271f759e43507d10e932ab0.tar.gz
Revert "Revert "IVGCVSW-6700 Enable import aligned host memory in android-nn-driver""
This reverts commit 8069603dc44b7673b356f66517cd8b25af8080f0. * Reason for revert: Try reenable import aligned host memory in android-nn-driver * Added a check to ArmNNDriverImpl.cpp to not call ExecuteWithDummyInputs with GpuAcc * Added new android-nn-driver driver options to enable / disable Import and Export * Import is disabled by default for now due to conv2d issues * Export is enabled by default !armnn:7147 Change-Id: I91110c58ebb3931d1c458e3774944e55c1250dd8 Signed-off-by: David Monahan <David.Monahan@arm.com>
Diffstat (limited to 'ArmnnPreparedModel.cpp')
-rw-r--r--ArmnnPreparedModel.cpp36
1 files changed, 33 insertions, 3 deletions
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 38f1bc20..326351c0 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -8,6 +8,8 @@
#include "ArmnnPreparedModel.hpp"
#include "Utils.hpp"
+#include <armnn/Types.hpp>
+
#include <log/log.h>
#include <OperationsUtils.h>
#include <ValidateHal.h>
@@ -116,7 +118,9 @@ ArmnnPreparedModel<HalVersion>::ArmnnPreparedModel(armnn::NetworkId networkId,
const std::string& requestInputsAndOutputsDumpDir,
const bool gpuProfilingEnabled,
const bool asyncModelExecutionEnabled,
- const unsigned int numberOfThreads)
+ const unsigned int numberOfThreads,
+ const bool importEnabled,
+ const bool exportEnabled)
: m_NetworkId(networkId)
, m_Runtime(runtime)
, m_Model(model)
@@ -124,6 +128,8 @@ ArmnnPreparedModel<HalVersion>::ArmnnPreparedModel(armnn::NetworkId networkId,
, m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
, m_GpuProfilingEnabled(gpuProfilingEnabled)
, m_AsyncModelExecutionEnabled(asyncModelExecutionEnabled)
+ , m_EnableImport(importEnabled)
+ , m_EnableExport(exportEnabled)
{
// Enable profiling if required.
m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
@@ -308,7 +314,19 @@ void ArmnnPreparedModel<HalVersion>::ExecuteGraph(
else
{
ALOGW("ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false");
- status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+ // Create a vector of Input and Output Ids which can be imported. An empty vector means all will be copied.
+ std::vector<armnn::ImportedInputId> importedInputIds;
+ if (m_EnableImport)
+ {
+ importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
+ }
+ std::vector<armnn::ImportedOutputId> importedOutputIds;
+ if (m_EnableExport)
+ {
+ importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
+ }
+ status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors,
+ importedInputIds, importedOutputIds);
}
if (status != armnn::Status::Success)
@@ -389,7 +407,19 @@ bool ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
else
{
ALOGW("ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false");
- status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+ // Create a vector of Input and Output Ids which can be imported. An empty vector means all will be copied.
+ std::vector<armnn::ImportedInputId> importedInputIds;
+ if (m_EnableImport)
+ {
+ importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
+ }
+ std::vector<armnn::ImportedOutputId> importedOutputIds;
+ if (m_EnableExport)
+ {
+ importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
+ }
+ status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors,
+ importedInputIds, importedOutputIds);
}
if (status != armnn::Status::Success)
{