From 0ea0a08aad80d3c4aaf41bcb8916e14e8f88edeb Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Tue, 2 Aug 2022 14:17:39 +0100 Subject: Bug Fixes for refactor of the ExecuteNetwork. * dot file to be generated when -v is given. It was only being generated when using the delegate as executor * output name read from m_Params.m_OutputNames instead of m_TfLiteInterpreter * typo: "delage" instead of "delegate" * QAsymmS8 templated as int8, instead of uint8 Change-Id: Ie13ae0f7e6395c0ebcb5ecda32e72082dee8aa6c Signed-off-by: Teresa Charlin Change-Id: Iac97a23927ba42290ebeb3446bbd36da15045e07 --- tests/ExecuteNetwork/ArmNNExecutor.cpp | 11 ++++++++++- tests/ExecuteNetwork/ExecuteNetworkParams.cpp | 8 -------- tests/ExecuteNetwork/TfliteExecutor.cpp | 18 +++++++++--------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp index 5be3383061..d1892f9d42 100644 --- a/tests/ExecuteNetwork/ArmNNExecutor.cpp +++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp @@ -533,6 +533,15 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw LogAndThrow("Optimize returned nullptr"); } + // If v,visualize-optimized-model is enabled then construct a file name for the dot file. + if (m_Params.m_EnableLayerDetails) + { + fs::path filename = m_Params.m_ModelPath; + filename.replace_extension("dot"); + std::fstream file(filename.c_str(), std::ios_base::out); + optNet->SerializeToDot(file); + } + return optNet; } @@ -683,12 +692,12 @@ void ArmNNExecutor::CompareAndPrintResult(std::vector otherOutput) break; } case armnn::DataType::QSymmS8: + case armnn::DataType::QAsymmS8: { result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); break; } case armnn::DataType::QAsymmU8: - case armnn::DataType::QAsymmS8: { result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); break; diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp index f341c30738..17c08717e4 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp @@ -171,14 +171,6 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const } delegateOptions.SetOptimizerOptions(options); - // If v,visualize-optimized-model is enabled then construct a file name for the dot file. - if (m_EnableLayerDetails) - { - fs::path filename = m_ModelPath; - filename.replace_extension("dot"); - delegateOptions.SetSerializeToDot(filename); - } - return delegateOptions; } diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index dc495be5c3..98b6c9dad1 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -144,7 +144,7 @@ std::vector TfLiteExecutor::Execute() outputSize *= outputDims->data[dim]; } - std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; + std::cout << m_Params.m_OutputNames[outputIndex] << ": "; results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) @@ -152,38 +152,38 @@ std::vector TfLiteExecutor::Execute() case kTfLiteFloat32: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%f ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteInt32: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteUInt8: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%u ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteInt8: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); } break; } -- cgit v1.2.1