From d047262b7fb68ad1fe0a2273ee79ab7952c72a6e Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Mon, 6 Mar 2023 12:34:54 +0000 Subject: Fixing compare output feature in ExecuteNetwork The -A -B -C options in execute network were attempting to calculate the RMS error over output tensors. However, the calculation was mixing tensor elements and bytes when doing the calculation. This patch changes the calculation to use a per byte RMS error calculation. Signed-off-by: Colm Donelan Change-Id: If30230a16cfed1a8804b4d54ed1abcd371f26664 --- tests/ExecuteNetwork/ArmNNExecutor.cpp | 40 ++------------------ .../ExecuteNetworkProgramOptions.cpp | 12 +++--- tests/ExecuteNetwork/TfliteExecutor.cpp | 44 ++-------------------- 3 files changed, 14 insertions(+), 82 deletions(-) (limited to 'tests/ExecuteNetwork') diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp index 730c072836..29ef4c5186 100644 --- a/tests/ExecuteNetwork/ArmNNExecutor.cpp +++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp @@ -707,48 +707,14 @@ void ArmNNExecutor::PrintOutputTensors(const armnn::OutputTensors* outputTensors void ArmNNExecutor::CompareAndPrintResult(std::vector otherOutput) { unsigned int index = 0; - + std::string typeString; for (const auto& outputTensors: m_OutputTensorsVec) { for (const auto& outputTensor: outputTensors) { - float result = 0; size_t size = outputTensor.second.GetNumBytes(); - - switch (outputTensor.second.GetDataType()) - { - case armnn::DataType::Float32: - { - result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); - break; - } - case armnn::DataType::Signed32: - { - result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); - break; - } - case armnn::DataType::QSymmS16: - { - result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); - break; - } - case armnn::DataType::QSymmS8: - case armnn::DataType::QAsymmS8: - { - result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); - break; - } - case armnn::DataType::QAsymmU8: - { - result = ComputeRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); - break; - } - default: - { - LogAndThrow("Unexpected DataType"); - } - } - std::cout << "RMSE: of " << result << "\n"; + double result = ComputeByteLevelRMSE(outputTensor.second.GetMemoryArea(), otherOutput[index++], size); + std::cout << "Byte level root mean square error: " << result << "\n"; } } } diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp index cba6748b45..007f81890e 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp @@ -359,17 +359,19 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", cxxopts::value()->default_value("parser")) ("C, compare-output", - "Compare the output of the network with an output file that has been previously " - "produced by running a network through ExecuteNetwork. See --write-outputs-to-file " - "to produce an output file for an execution.", + "Perform a per byte root mean square error calculation of the inference output with an output" + " file that has been previously produced by running a network through ExecuteNetwork." + " See --write-outputs-to-file to produce an output file for an execution.", cxxopts::value(m_ExNetParams.m_ComparisonFile)) ("B, compare-output-with-backend", - "Compare the output of the network with a different backend.", + "Perform a per byte root mean square error calculation of the output of the inference with a" + " different backend.", cxxopts::value>()) ("A, compare-with-tflite", - "Compare the output of the network with the tflite ref model.", + "Perform an per byte root mean square error calculation of the output of the inference with" + " the tflite ref model.", cxxopts::value(m_ExNetParams.m_CompareWithTflite)->default_value("false") ->implicit_value("true")); diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index fc9c21a559..f365623d62 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -230,45 +230,9 @@ void TfLiteExecutor::CompareAndPrintResult(std::vector otherOutput) for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex) { auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex]; - float result = 0; - switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) - { - case kTfLiteFloat32: - { - result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, - otherOutput[outputIndex], - m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); - - break; - } - case kTfLiteInt32: - { - result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, - otherOutput[outputIndex], - m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); - break; - } - case kTfLiteUInt8: - { - result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, - otherOutput[outputIndex], - m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); - break; - } - case kTfLiteInt8: - { - result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, - otherOutput[outputIndex], - m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); - break; - } - default: - { - } - } - - std::cout << "RMSE of " - << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name - << ": " << result << std::endl; + size_t size = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes; + double result = ComputeByteLevelRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, + otherOutput[outputIndex], size); + std::cout << "Byte level root mean square error: " << result << "\n"; } }; -- cgit v1.2.1