diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-08-05 13:57:04 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-05 16:34:06 +0100 |
commit | c814e8031dfe9565e94cf89ef193b6af944c73b1 (patch) | |
tree | 0fc372058f3c7115e36470930e16f65bd4a48b47 /tests/ExecuteNetwork/TfliteExecutor.cpp | |
parent | d4d1c31c077d3b73b625465a00926ac16ad90f73 (diff) | |
download | armnn-c814e8031dfe9565e94cf89ef193b6af944c73b1.tar.gz |
IVGCVSW-7179 Segfault running ssd_mobilenet_v2_int8 with ExNet and delegate.
* model was declared in the TfLiteExecutor constructor, instead of intializing m_Model
* Working with this model that has 4 output we saw the the output names were not correct, this got fixed too
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I48f194ad4ba6af43d43e6eea336eb87ffee02dcc
Diffstat (limited to 'tests/ExecuteNetwork/TfliteExecutor.cpp')
-rw-r--r-- | tests/ExecuteNetwork/TfliteExecutor.cpp | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index 59c69f9d6a..d750fcccc7 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -7,13 +7,12 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(params) { - std::unique_ptr<tflite::FlatBufferModel> model = - tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str()); + m_Model = tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str()); m_TfLiteInterpreter = std::make_unique<Interpreter>(); tflite::ops::builtin::BuiltinOpResolver resolver; - tflite::InterpreterBuilder builder(*model, resolver); + tflite::InterpreterBuilder builder(*m_Model, resolver); builder(&m_TfLiteInterpreter); m_TfLiteInterpreter->AllocateTensors(); @@ -144,7 +143,7 @@ std::vector<const void *> TfLiteExecutor::Execute() outputSize *= outputDims->data[dim]; } - std::cout << m_Params.m_OutputNames[outputIndex] << ": "; + std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) |