From 2fef6f61132eab444485991fc68b536e886cb18c Mon Sep 17 00:00:00 2001 From: Kevin May Date: Mon, 14 Nov 2022 17:07:49 +0000 Subject: IVGCVSW-7332 Fix Delegate Profiling in ExNet with multiple iterations Signed-off-by: Kevin May Change-Id: If837e4bec7940b53d18d0da32f3e736215dd2a03 --- tests/ExecuteNetwork/TfliteExecutor.cpp | 125 ++++++++++++++++---------------- 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index d501062643..41716ffb93 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -112,87 +112,90 @@ std::vector TfLiteExecutor::Execute() status = m_TfLiteInterpreter->Invoke(); const auto duration = armnn::GetTimeDuration(start_time); - if (m_Params.m_DontPrintOutputs || m_Params.m_ReuseBuffers) + if (!m_Params.m_DontPrintOutputs) { - break; - } - // Print out the output - for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex) - { - auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex]; - TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; - // If we've been asked to write to a file then set a file output stream. Otherwise use stdout. - FILE* outputTensorFile = stdout; - if (!m_Params.m_OutputTensorFiles.empty()) + // Print out the output + for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex) { - outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w"); - if (outputTensorFile == NULL) + auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex]; + TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + // If we've been asked to write to a file then set a file output stream. Otherwise use stdout. + FILE* outputTensorFile = stdout; + if (!m_Params.m_OutputTensorFiles.empty()) { - LogAndThrow("Specified output tensor file, \"" + m_Params.m_OutputTensorFiles[outputIndex] + - "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno)); + outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w"); + if (outputTensorFile == NULL) + { + LogAndThrow("Specified output tensor file, \"" + m_Params.m_OutputTensorFiles[outputIndex] + + "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno)); + } + else + { + ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x + 1 + << " to file: '" << m_Params.m_OutputTensorFiles[outputIndex] << "'"; + } } - else + long outputSize = 1; + for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) { - ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x+1 << " to file: '" - << m_Params.m_OutputTensorFiles[outputIndex] << "'"; + outputSize *= outputDims->data[dim]; } - } - long outputSize = 1; - for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) - { - outputSize *= outputDims->data[dim]; - } - std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; - results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); + std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; + results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); - switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) - { - - case kTfLiteFloat32: + switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) { - auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - for (int i = 0; i < outputSize; ++i) + case kTfLiteFloat32: { - fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor( + tfLiteDelegateOutputId); + + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]); + } + break; } - break; - } - case kTfLiteInt32: - { - auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - for (int i = 0; i < outputSize; ++i) + case kTfLiteInt32: { - fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor( + tfLiteDelegateOutputId); + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); + } + break; } - break; - } - case kTfLiteUInt8: - { - auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - for (int i = 0; i < outputSize; ++i) + case kTfLiteUInt8: { - fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor( + tfLiteDelegateOutputId); + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]); + } + break; } - break; - } - case kTfLiteInt8: - { - auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - for (int i = 0; i < outputSize; ++i) + case kTfLiteInt8: { - fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); + auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor( + tfLiteDelegateOutputId); + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]); + } + break; + } + default: + { + LogAndThrow("Unsupported output type"); } - break; - } - default: - { - LogAndThrow("Unsupported output type"); } - } - std::cout << std::endl; + std::cout << std::endl; + } } CheckInferenceTimeThreshold(duration, m_Params.m_ThresholdTime); } -- cgit v1.2.1