aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-08-05 13:57:04 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2022-08-05 13:22:55 +0000
commit7dbaaa57148c061396c241911936d2e40fd68b7a (patch)
tree0fc372058f3c7115e36470930e16f65bd4a48b47
parent2803c8d09dd380b361f92dd57fc897eab5033a95 (diff)
downloadarmnn-7dbaaa57148c061396c241911936d2e40fd68b7a.tar.gz
IVGCVSW-7179 Segfault running ssd_mobilenet_v2_int8 with ExNet and delegate.
* model was declared in the TfLiteExecutor constructor, instead of intializing m_Model * Working with this model that has 4 output we saw the the output names were not correct, this got fixed too Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I48f194ad4ba6af43d43e6eea336eb87ffee02dcc
-rw-r--r--tests/ExecuteNetwork/TfliteExecutor.cpp7
1 files changed, 3 insertions, 4 deletions
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp
index 59c69f9d6a..d750fcccc7 100644
--- a/tests/ExecuteNetwork/TfliteExecutor.cpp
+++ b/tests/ExecuteNetwork/TfliteExecutor.cpp
@@ -7,13 +7,12 @@
TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(params)
{
- std::unique_ptr<tflite::FlatBufferModel> model =
- tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str());
+ m_Model = tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str());
m_TfLiteInterpreter = std::make_unique<Interpreter>();
tflite::ops::builtin::BuiltinOpResolver resolver;
- tflite::InterpreterBuilder builder(*model, resolver);
+ tflite::InterpreterBuilder builder(*m_Model, resolver);
builder(&m_TfLiteInterpreter);
m_TfLiteInterpreter->AllocateTensors();
@@ -144,7 +143,7 @@ std::vector<const void *> TfLiteExecutor::Execute()
outputSize *= outputDims->data[dim];
}
- std::cout << m_Params.m_OutputNames[outputIndex] << ": ";
+ std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)