From 4869c543e57680e798e98643c98bb1816092956d Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Thu, 4 Aug 2022 17:58:09 +0100 Subject: IVGCVSW-7157 ExNet. interpreter chooses a different input type. Signed-off-by: Cathal Corbett Change-Id: If00d8dab2846c484a1969fb152cb9f8bd16e1b3e --- tests/ExecuteNetwork/TfliteExecutor.cpp | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index 98b6c9dad1..59c69f9d6a 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -30,7 +30,7 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(pa armnnDelegate::TfLiteArmnnDelegateDelete); // Register armnn_delegate to TfLiteInterpreter status = m_TfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); - if (status == kTfLiteError) + if (status != kTfLiteOk) { LogAndThrow("Could not register ArmNN TfLite Delegate to TfLiteInterpreter"); } @@ -40,14 +40,14 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(pa std::cout << "Running on TfLite without ArmNN delegate\n"; } - armnn::Optional dataFile = m_Params.m_GenerateTensorData - ? armnn::EmptyOptional() - : armnn::MakeOptional(m_Params.m_InputTensorDataFilePaths[0]); - const size_t numInputs = m_Params.m_InputNames.size(); for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) { + armnn::Optional dataFile = m_Params.m_GenerateTensorData + ? armnn::EmptyOptional() + : armnn::MakeOptional(m_Params.m_InputTensorDataFilePaths[inputIndex]); + int input = m_TfLiteInterpreter->inputs()[inputIndex]; TfLiteIntArray* inputDims = m_TfLiteInterpreter->tensor(input)->dims; @@ -58,39 +58,39 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(pa inputSize *= inputDims->data[dim]; } - const auto& inputName = m_TfLiteInterpreter->input_tensor(input)->name; - const auto& dataType = m_TfLiteInterpreter->input_tensor(input)->type; + const auto& inputName = m_TfLiteInterpreter->tensor(input)->name; + const auto& dataType = m_TfLiteInterpreter->tensor(input)->type; switch (dataType) { case kTfLiteFloat32: { auto inputData = m_TfLiteInterpreter->typed_tensor(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); break; } case kTfLiteInt32: { - auto inputData = m_TfLiteInterpreter->typed_tensor(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + auto inputData = m_TfLiteInterpreter->typed_tensor(input); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); break; } case kTfLiteUInt8: { auto inputData = m_TfLiteInterpreter->typed_tensor(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); break; } case kTfLiteInt16: { auto inputData = m_TfLiteInterpreter->typed_tensor(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); break; } case kTfLiteInt8: { auto inputData = m_TfLiteInterpreter->typed_tensor(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); break; } default: -- cgit v1.2.1