diff options
-rw-r--r-- | tests/ExecuteNetwork/ArmNNExecutor.cpp | 11 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkParams.cpp | 8 | ||||
-rw-r--r-- | tests/ExecuteNetwork/TfliteExecutor.cpp | 18 |
3 files changed, 19 insertions, 18 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp index 5be3383061..d1892f9d42 100644 --- a/tests/ExecuteNetwork/ArmNNExecutor.cpp +++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp @@ -533,6 +533,15 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw LogAndThrow("Optimize returned nullptr"); } + // If v,visualize-optimized-model is enabled then construct a file name for the dot file. + if (m_Params.m_EnableLayerDetails) + { + fs::path filename = m_Params.m_ModelPath; + filename.replace_extension("dot"); + std::fstream file(filename.c_str(), std::ios_base::out); + optNet->SerializeToDot(file); + } + return optNet; } @@ -683,12 +692,12 @@ void ArmNNExecutor::CompareAndPrintResult(std::vector<const void*> otherOutput) break; } case armnn::DataType::QSymmS8: + case armnn::DataType::QAsymmS8: { result = ComputeRMSE<int8_t>(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); break; } case armnn::DataType::QAsymmU8: - case armnn::DataType::QAsymmS8: { result = ComputeRMSE<uint8_t>(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); break; diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp index f341c30738..17c08717e4 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp @@ -171,14 +171,6 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const } delegateOptions.SetOptimizerOptions(options); - // If v,visualize-optimized-model is enabled then construct a file name for the dot file. - if (m_EnableLayerDetails) - { - fs::path filename = m_ModelPath; - filename.replace_extension("dot"); - delegateOptions.SetSerializeToDot(filename); - } - return delegateOptions; } diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index dc495be5c3..98b6c9dad1 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -144,7 +144,7 @@ std::vector<const void *> TfLiteExecutor::Execute() outputSize *= outputDims->data[dim]; } - std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; + std::cout << m_Params.m_OutputNames[outputIndex] << ": "; results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) @@ -152,38 +152,38 @@ std::vector<const void *> TfLiteExecutor::Execute() case kTfLiteFloat32: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%f ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteInt32: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteUInt8: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%u ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]); } break; } case kTfLiteInt8: { - auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId); for (int i = 0; i < outputSize; ++i) { - fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); } break; } |