From 8509422e1e7d629f88d6a5e4e4bded8a682f435b Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Fri, 28 Jan 2022 23:26:07 +0000 Subject: IVGCVSW-6700 Enable import aligned host memory in android-nn-driver !armnn:7025 Signed-off-by: Narumol Prangnawarat Change-Id: Iaa6112fb52d0f2942e6c52f90d96a8dc96000518 --- ArmnnPreparedModel.cpp | 16 ++++++++++++++-- ArmnnPreparedModel_1_2.cpp | 9 ++++++++- ArmnnPreparedModel_1_3.cpp | 9 ++++++++- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp index 563abd44..ea48c0cc 100644 --- a/ArmnnPreparedModel.cpp +++ b/ArmnnPreparedModel.cpp @@ -8,6 +8,8 @@ #include "ArmnnPreparedModel.hpp" #include "Utils.hpp" +#include + #include #include #include @@ -305,7 +307,12 @@ void ArmnnPreparedModel::ExecuteGraph( else { ALOGW("ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false"); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); + std::vector importedInputIds = + m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); + std::vector importedOutputIds = + m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, + importedInputIds, importedOutputIds); } if (status != armnn::Status::Success) @@ -386,7 +393,12 @@ bool ArmnnPreparedModel::ExecuteWithDummyInputs() else { ALOGW("ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false"); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); + std::vector importedInputIds = + m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); + std::vector importedOutputIds = + m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, + importedInputIds, importedOutputIds); } if (status != armnn::Status::Success) { diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp index 29055a70..08790d38 100644 --- a/ArmnnPreparedModel_1_2.cpp +++ b/ArmnnPreparedModel_1_2.cpp @@ -9,6 +9,8 @@ #include "Utils.hpp" +#include + #include #include #include @@ -528,7 +530,12 @@ bool ArmnnPreparedModel_1_2::ExecuteGraph( else { ALOGW("ArmnnPreparedModel_1_2::ExecuteGraph m_AsyncModelExecutionEnabled false"); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); + std::vector importedInputIds = + m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); + std::vector importedOutputIds = + m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, + importedInputIds, importedOutputIds); } if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES) diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp index abd135e8..236c4f27 100644 --- a/ArmnnPreparedModel_1_3.cpp +++ b/ArmnnPreparedModel_1_3.cpp @@ -12,6 +12,8 @@ #include "ArmnnPreparedModel_1_3.hpp" #include "Utils.hpp" +#include + #include #include #include @@ -821,7 +823,12 @@ Return ArmnnPreparedModel_1_3::ExecuteGraph( else { ALOGW("ArmnnPreparedModel_1_3::ExecuteGraph m_AsyncModelExecutionEnabled false"); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); + std::vector importedInputIds = + m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); + std::vector importedOutputIds = + m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, + importedInputIds, importedOutputIds); } if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES) -- cgit v1.2.1