aboutsummaryrefslogtreecommitdiff
path: root/ArmnnDriverImpl.cpp
diff options
context:
space:
mode:
authorMatthew Bentham <matthew.bentham@arm.com>2019-04-01 17:17:58 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2019-04-02 10:22:16 +0100
commit16196e267833178dae62926ff090c50ec6813ad4 (patch)
tree03c0f7cd2490a1e68a63f71ef08c6a6c591f363c /ArmnnDriverImpl.cpp
parenta6542c53e929172a36b4d3787686c13a7c569386 (diff)
downloadandroid-nn-driver-16196e267833178dae62926ff090c50ec6813ad4.tar.gz
MLCE-110 Propagate error from armnn EnqueueWorkload
Change-Id: Ic53b1cdbdd3a7d656932651c74911940affc09b6 Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
Diffstat (limited to 'ArmnnDriverImpl.cpp')
-rw-r--r--ArmnnDriverImpl.cpp5
1 files changed, 4 insertions, 1 deletions
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index a3c2e10f..f6456ee1 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -208,7 +208,10 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
// Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
// this is enabled) before the first 'real' inference which removes the overhead of the first inference.
- preparedModel->ExecuteWithDummyInputs();
+ if (!preparedModel->ExecuteWithDummyInputs())
+ {
+ return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+ }
if (clTunedParameters &&
options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)