aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-08-02 14:17:39 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2022-08-03 11:05:52 +0000
commit0ea0a08aad80d3c4aaf41bcb8916e14e8f88edeb (patch)
tree86285d8c09a5e95a532a0b45699df15b4b25a4da
parenta8d919d11c9b5c8bc6b4a8d9c07fac5d8f1a12b4 (diff)
downloadarmnn-0ea0a08aad80d3c4aaf41bcb8916e14e8f88edeb.tar.gz
Bug Fixes for refactor of the ExecuteNetwork.
* dot file to be generated when -v is given. It was only being generated when using the delegate as executor * output name read from m_Params.m_OutputNames instead of m_TfLiteInterpreter * typo: "delage" instead of "delegate" * QAsymmS8 templated as int8, instead of uint8 Change-Id: Ie13ae0f7e6395c0ebcb5ecda32e72082dee8aa6c Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Iac97a23927ba42290ebeb3446bbd36da15045e07
-rw-r--r--tests/ExecuteNetwork/ArmNNExecutor.cpp11
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp8
-rw-r--r--tests/ExecuteNetwork/TfliteExecutor.cpp18
3 files changed, 19 insertions, 18 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 5be3383061..d1892f9d42 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -533,6 +533,15 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw
LogAndThrow("Optimize returned nullptr");
}
+ // If v,visualize-optimized-model is enabled then construct a file name for the dot file.
+ if (m_Params.m_EnableLayerDetails)
+ {
+ fs::path filename = m_Params.m_ModelPath;
+ filename.replace_extension("dot");
+ std::fstream file(filename.c_str(), std::ios_base::out);
+ optNet->SerializeToDot(file);
+ }
+
return optNet;
}
@@ -683,12 +692,12 @@ void ArmNNExecutor::CompareAndPrintResult(std::vector<const void*> otherOutput)
break;
}
case armnn::DataType::QSymmS8:
+ case armnn::DataType::QAsymmS8:
{
result = ComputeRMSE<int8_t>(outputTensor.second.GetMemoryArea(), otherOutput[index++], size);
break;
}
case armnn::DataType::QAsymmU8:
- case armnn::DataType::QAsymmS8:
{
result = ComputeRMSE<uint8_t>(outputTensor.second.GetMemoryArea(), otherOutput[index++], size);
break;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index f341c30738..17c08717e4 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -171,14 +171,6 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
}
delegateOptions.SetOptimizerOptions(options);
- // If v,visualize-optimized-model is enabled then construct a file name for the dot file.
- if (m_EnableLayerDetails)
- {
- fs::path filename = m_ModelPath;
- filename.replace_extension("dot");
- delegateOptions.SetSerializeToDot(filename);
- }
-
return delegateOptions;
}
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp
index dc495be5c3..98b6c9dad1 100644
--- a/tests/ExecuteNetwork/TfliteExecutor.cpp
+++ b/tests/ExecuteNetwork/TfliteExecutor.cpp
@@ -144,7 +144,7 @@ std::vector<const void *> TfLiteExecutor::Execute()
outputSize *= outputDims->data[dim];
}
- std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
+ std::cout << m_Params.m_OutputNames[outputIndex] << ": ";
results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
@@ -152,38 +152,38 @@ std::vector<const void *> TfLiteExecutor::Execute()
case kTfLiteFloat32:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%f ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
}
break;
}
case kTfLiteInt32:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
}
break;
}
case kTfLiteUInt8:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%u ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
}
break;
}
case kTfLiteInt8:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
}
break;
}