aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2022-11-14 17:07:49 +0000
committerKevin May <kevin.may@arm.com>2022-11-15 11:23:33 +0000
commit2fef6f61132eab444485991fc68b536e886cb18c (patch)
treef43923f520cb618ab80828c10ea5851cbe2891e1
parent2523b79fd01dfba6c12fa903c80b7a66b853f861 (diff)
downloadarmnn-2fef6f61132eab444485991fc68b536e886cb18c.tar.gz
IVGCVSW-7332 Fix Delegate Profiling in ExNet with multiple iterations
Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: If837e4bec7940b53d18d0da32f3e736215dd2a03
-rw-r--r--tests/ExecuteNetwork/TfliteExecutor.cpp125
1 files changed, 64 insertions, 61 deletions
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp
index d501062643..41716ffb93 100644
--- a/tests/ExecuteNetwork/TfliteExecutor.cpp
+++ b/tests/ExecuteNetwork/TfliteExecutor.cpp
@@ -112,87 +112,90 @@ std::vector<const void *> TfLiteExecutor::Execute()
status = m_TfLiteInterpreter->Invoke();
const auto duration = armnn::GetTimeDuration(start_time);
- if (m_Params.m_DontPrintOutputs || m_Params.m_ReuseBuffers)
+ if (!m_Params.m_DontPrintOutputs)
{
- break;
- }
- // Print out the output
- for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex)
- {
- auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex];
- TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
- // If we've been asked to write to a file then set a file output stream. Otherwise use stdout.
- FILE* outputTensorFile = stdout;
- if (!m_Params.m_OutputTensorFiles.empty())
+ // Print out the output
+ for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex)
{
- outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w");
- if (outputTensorFile == NULL)
+ auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex];
+ TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
+ // If we've been asked to write to a file then set a file output stream. Otherwise use stdout.
+ FILE* outputTensorFile = stdout;
+ if (!m_Params.m_OutputTensorFiles.empty())
{
- LogAndThrow("Specified output tensor file, \"" + m_Params.m_OutputTensorFiles[outputIndex] +
- "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno));
+ outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w");
+ if (outputTensorFile == NULL)
+ {
+ LogAndThrow("Specified output tensor file, \"" + m_Params.m_OutputTensorFiles[outputIndex] +
+ "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno));
+ }
+ else
+ {
+ ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x + 1
+ << " to file: '" << m_Params.m_OutputTensorFiles[outputIndex] << "'";
+ }
}
- else
+ long outputSize = 1;
+ for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
{
- ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x+1 << " to file: '"
- << m_Params.m_OutputTensorFiles[outputIndex] << "'";
+ outputSize *= outputDims->data[dim];
}
- }
- long outputSize = 1;
- for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
- {
- outputSize *= outputDims->data[dim];
- }
- std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
- results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
+ std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
+ results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
- switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
- {
-
- case kTfLiteFloat32:
+ switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
{
- auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- for (int i = 0; i < outputSize; ++i)
+ case kTfLiteFloat32:
{
- fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(
+ tfLiteDelegateOutputId);
+
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
+ }
+ break;
}
- break;
- }
- case kTfLiteInt32:
- {
- auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
- for (int i = 0; i < outputSize; ++i)
+ case kTfLiteInt32:
{
- fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(
+ tfLiteDelegateOutputId);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ }
+ break;
}
- break;
- }
- case kTfLiteUInt8:
- {
- auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
- for (int i = 0; i < outputSize; ++i)
+ case kTfLiteUInt8:
{
- fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(
+ tfLiteDelegateOutputId);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
+ }
+ break;
}
- break;
- }
- case kTfLiteInt8:
- {
- auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
- for (int i = 0; i < outputSize; ++i)
+ case kTfLiteInt8:
{
- fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(
+ tfLiteDelegateOutputId);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ }
+ break;
+ }
+ default:
+ {
+ LogAndThrow("Unsupported output type");
}
- break;
- }
- default:
- {
- LogAndThrow("Unsupported output type");
}
- }
- std::cout << std::endl;
+ std::cout << std::endl;
+ }
}
CheckInferenceTimeThreshold(duration, m_Params.m_ThresholdTime);
}