aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2023-05-09 16:01:12 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2023-05-09 16:01:41 +0100
commitfe1827dbbecb64043f403267aeaa48da5965bd9f (patch)
treefca72a7133393c46b41e5a5bb0e1be56e836149d
parentf658554c25d91d530226b8f5e5c5fc02b3275ef0 (diff)
downloadarmnn-fe1827dbbecb64043f403267aeaa48da5965bd9f.tar.gz
IVGCVSW-7626 Change sequence of Interpreter Building
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I3f0e224c90a4eea9945183028c9de1b61e75e510
-rw-r--r--tests/ExecuteNetwork/TfliteExecutor.cpp22
1 files changed, 14 insertions, 8 deletions
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp
index 04f6ddb72a..8412750951 100644
--- a/tests/ExecuteNetwork/TfliteExecutor.cpp
+++ b/tests/ExecuteNetwork/TfliteExecutor.cpp
@@ -23,14 +23,6 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params, armnn::IRunti
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*m_Model, resolver);
- if (builder(&m_TfLiteInterpreter) != kTfLiteOk)
- {
- LogAndThrow("Error loading the model into the TfLiteInterpreter.");
- }
- if (m_TfLiteInterpreter->AllocateTensors() != kTfLiteOk)
- {
- LogAndThrow("Failed to allocate tensors in the TfLiteInterpreter.");
- }
if (m_Params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteOpaqueDelegate)
{
@@ -51,6 +43,11 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params, armnn::IRunti
// Add Delegate to the builder
builder.AddDelegate(armnnDelegate.get());
+ if (builder(&m_TfLiteInterpreter) != kTfLiteOk)
+ {
+ LogAndThrow("Error loading the model into the TfLiteInterpreter.");
+ }
+
#else
LogAndThrow("Not built with Arm NN Tensorflow-Lite opaque delegate support.");
#endif
@@ -58,6 +55,10 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params, armnn::IRunti
else if (m_Params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate)
{
#if defined(ARMNN_TFLITE_DELEGATE)
+ if (builder(&m_TfLiteInterpreter) != kTfLiteOk)
+ {
+ LogAndThrow("Error loading the model into the TfLiteInterpreter.");
+ }
// Create the Armnn Delegate
// Populate a DelegateOptions from the ExecuteNetworkParams.
armnnDelegate::DelegateOptions delegateOptions = m_Params.ToDelegateOptions();
@@ -79,6 +80,11 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params, armnn::IRunti
std::cout << "Running on TfLite without ArmNN delegate\n";
}
+ if (m_TfLiteInterpreter->AllocateTensors() != kTfLiteOk)
+ {
+ LogAndThrow("Failed to allocate tensors in the TfLiteInterpreter.");
+ }
+
const size_t numInputs = m_TfLiteInterpreter->inputs().size();
for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)