diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-08-02 14:17:39 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-05 15:53:06 +0100 |
commit | 98d3fd80fa821d4db74c559718f7b6be0fa0d309 (patch) | |
tree | 86285d8c09a5e95a532a0b45699df15b4b25a4da /tests/ExecuteNetwork | |
parent | c4988e9077e364ce9b4e4a8901424ccc90653ba1 (diff) | |
download | armnn-98d3fd80fa821d4db74c559718f7b6be0fa0d309.tar.gz |
Bug Fixes for refactor of the ExecuteNetwork.
* dot file to be generated when -v is given. It was only being generated when using the delegate as executor
* output name read from m_Params.m_OutputNames instead of m_TfLiteInterpreter
* typo: "delage" instead of "delegate"
* QAsymmS8 templated as int8, instead of uint8
Change-Id: Ie13ae0f7e6395c0ebcb5ecda32e72082dee8aa6c
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Iac97a23927ba42290ebeb3446bbd36da15045e07
Diffstat (limited to 'tests/ExecuteNetwork')
-rw-r--r-- | tests/ExecuteNetwork/ArmNNExecutor.cpp | 11 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkParams.cpp | 8 | ||||
-rw-r--r-- | tests/ExecuteNetwork/TfliteExecutor.cpp | 18 |
3 files changed, 19 insertions, 18 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp index 5be3383061..d1892f9d42 100644 --- a/tests/ExecuteNetwork/ArmNNExecutor.cpp +++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp @@ -533,6 +533,15 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw LogAndThrow("Optimize returned nullptr"); } + // If v,visualize-optimized-model is enabled then construct a file name for the dot file. + if (m_Params.m_EnableLayerDetails) + { + fs::path filename = m_Params.m_ModelPath; + filename.replace_extension("dot"); + std::fstream file(filename.c_str(), std::ios_base::out); + optNet->SerializeToDot(file); + } + return optNet; } @@ -683,12 +692,12 @@ void ArmNNExecutor::CompareAndPrintResult(std::vector<const void*> otherOutput) break; } case armnn::DataType::QSymmS8: + case armnn::DataType::QAsymmS8: { result = ComputeRMSE<int8_t>(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); break; } case armnn::DataType::QAsymmU8: - case armnn::DataType::QAsymmS8: { result = ComputeRMSE<uint8_t>(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); break; diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp index f341c30738..17c08717e4 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp @@ -171,14 +171,6 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const } delegateOptions.SetOptimizerOptions(options); - // If v,visualize-optimized-model is enabled then construct a file name for the dot file. - if (m_EnableLayerDetails) - { - fs::path filename = m_ModelPath; - filename.replace_extension("dot"); - delegateOptions.SetSerializeToDot(filename); - } - return delegateOptions; } diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index dc495be5c3..98b6c9dad1 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -144,7 +144,7 @@ std::vector<const void *> TfLiteExecutor::Execute() outputSize *= outputDims->data[dim]; } - std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; + std::cout << m_Params.m_OutputNames[outputIndex] << ": "; results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) @@ -152,38 +152,38 @@ std::vector<const void *> TfLiteExecutor::Execute() case kTfLiteFloat32: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%f ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteInt32: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteUInt8: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%u ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteInt8: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); } break; } |