aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Bentham <matthew.bentham@arm.com>2019-04-01 17:17:58 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2019-04-02 10:22:16 +0100
commit16196e267833178dae62926ff090c50ec6813ad4 (patch)
tree03c0f7cd2490a1e68a63f71ef08c6a6c591f363c
parenta6542c53e929172a36b4d3787686c13a7c569386 (diff)
downloadandroid-nn-driver-16196e267833178dae62926ff090c50ec6813ad4.tar.gz
MLCE-110 Propagate error from armnn EnqueueWorkload
Change-Id: Ic53b1cdbdd3a7d656932651c74911940affc09b6 Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
-rw-r--r--ArmnnDriverImpl.cpp5
-rw-r--r--ArmnnPreparedModel.cpp19
-rw-r--r--ArmnnPreparedModel.hpp3
3 files changed, 22 insertions, 5 deletions
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index a3c2e10f..f6456ee1 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -208,7 +208,10 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
// Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
// this is enabled) before the first 'real' inference which removes the overhead of the first inference.
- preparedModel->ExecuteWithDummyInputs();
+ if (!preparedModel->ExecuteWithDummyInputs())
+ {
+ return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+ }
if (clTunedParameters &&
options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index d7f727f5..edb1c934 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -239,7 +239,13 @@ void ArmnnPreparedModel<HalVersion>::ExecuteGraph(
// run it
try
{
- m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors);
+ armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors);
+ if (status != armnn::Status::Success)
+ {
+ ALOGW("EnqueueWorkload failed");
+ NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
+ return;
+ }
}
catch (armnn::Exception& e)
{
@@ -262,7 +268,7 @@ void ArmnnPreparedModel<HalVersion>::ExecuteGraph(
}
template<typename HalVersion>
-void ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
+bool ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
{
std::vector<std::vector<char>> storage;
armnn::InputTensors inputTensors;
@@ -287,12 +293,19 @@ void ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
try
{
- m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+ armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+ if (status != armnn::Status::Success)
+ {
+ ALOGW("ExecuteWithDummyInputs: EnqueueWorkload failed");
+ return false;
+ }
}
catch (armnn::Exception& e)
{
ALOGW("ExecuteWithDummyInputs: armnn::Exception caught from EnqueueWorkload: %s", e.what());
+ return false;
}
+ return true;
}
///
diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp
index 3c4b32b7..f6008b80 100644
--- a/ArmnnPreparedModel.hpp
+++ b/ArmnnPreparedModel.hpp
@@ -42,7 +42,8 @@ public:
const ::android::sp<IExecutionCallback>& callback);
/// Executes this model with dummy inputs (e.g. all zeroes).
- void ExecuteWithDummyInputs();
+ /// \return false on failure, otherwise true
+ bool ExecuteWithDummyInputs();
private:
template <typename TensorBindingCollection>