From 8069603dc44b7673b356f66517cd8b25af8080f0 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Wed, 2 Feb 2022 12:17:46 +0000 Subject: Revert "IVGCVSW-6700 Enable import aligned host memory in android-nn-driver" This reverts commit 8509422e1e7d629f88d6a5e4e4bded8a682f435b. Reason for revert: Reverting as this might be the cause of several segfaults on CI Change-Id: I902b4ddaec23dc46a2459f8512ec458e1aa722f3 --- ArmnnPreparedModel.cpp | 16 ++-------------- ArmnnPreparedModel_1_2.cpp | 9 +-------- ArmnnPreparedModel_1_3.cpp | 9 +-------- 3 files changed, 4 insertions(+), 30 deletions(-) diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp index ea48c0cc..563abd44 100644 --- a/ArmnnPreparedModel.cpp +++ b/ArmnnPreparedModel.cpp @@ -8,8 +8,6 @@ #include "ArmnnPreparedModel.hpp" #include "Utils.hpp" -#include - #include #include #include @@ -307,12 +305,7 @@ void ArmnnPreparedModel::ExecuteGraph( else { ALOGW("ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false"); - std::vector importedInputIds = - m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); - std::vector importedOutputIds = - m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, - importedInputIds, importedOutputIds); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); } if (status != armnn::Status::Success) @@ -393,12 +386,7 @@ bool ArmnnPreparedModel::ExecuteWithDummyInputs() else { ALOGW("ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false"); - std::vector importedInputIds = - m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); - std::vector importedOutputIds = - m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, - importedInputIds, importedOutputIds); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); } if (status != armnn::Status::Success) { diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp index 08790d38..29055a70 100644 --- a/ArmnnPreparedModel_1_2.cpp +++ b/ArmnnPreparedModel_1_2.cpp @@ -9,8 +9,6 @@ #include "Utils.hpp" -#include - #include #include #include @@ -530,12 +528,7 @@ bool ArmnnPreparedModel_1_2::ExecuteGraph( else { ALOGW("ArmnnPreparedModel_1_2::ExecuteGraph m_AsyncModelExecutionEnabled false"); - std::vector importedInputIds = - m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); - std::vector importedOutputIds = - m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, - importedInputIds, importedOutputIds); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); } if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES) diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp index 236c4f27..abd135e8 100644 --- a/ArmnnPreparedModel_1_3.cpp +++ b/ArmnnPreparedModel_1_3.cpp @@ -12,8 +12,6 @@ #include "ArmnnPreparedModel_1_3.hpp" #include "Utils.hpp" -#include - #include #include #include @@ -823,12 +821,7 @@ Return ArmnnPreparedModel_1_3::ExecuteGraph( else { ALOGW("ArmnnPreparedModel_1_3::ExecuteGraph m_AsyncModelExecutionEnabled false"); - std::vector importedInputIds = - m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); - std::vector importedOutputIds = - m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc); - status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, - importedInputIds, importedOutputIds); + status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); } if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES) -- cgit v1.2.1