aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-09-07 12:46:15 +0100
committerJan Eilers <jan.eilers@arm.com>2021-09-13 10:05:00 +0100
commit284b5d19c8cf446b7eec16ea560c544ac39008c1 (patch)
tree8a4d34db3fff091bacd15b004b9c6e6e4b8248ba /tests/ExecuteNetwork
parent65d5d2ddf77c69e76643e40440aa986defe6d0d7 (diff)
downloadarmnn-284b5d19c8cf446b7eec16ea560c544ac39008c1.tar.gz
Add 'do-not-print-output' option to ExNet
Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I10fb010ee8d3f813d2264cefb526f352e30d7046
Diffstat (limited to 'tests/ExecuteNetwork')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp63
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp5
3 files changed, 51 insertions, 18 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 16d34c8c9d..8acab71290 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -238,9 +238,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
return EXIT_FAILURE;
}
- for (int i = 0; i < outputSize; ++i)
+ if (!params.m_DontPrintOutputs)
{
- printf("%f ", tfLiteDelageOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ printf("%f ", tfLiteDelageOutputData[i]);
+ }
}
}
else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
@@ -253,9 +256,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
return EXIT_FAILURE;
}
- for (int i = 0; i < outputSize; ++i)
+ if (!params.m_DontPrintOutputs)
{
- printf("%d ", tfLiteDelageOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ printf("%d ", tfLiteDelageOutputData[i]);
+ }
}
}
else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
@@ -268,9 +274,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
return EXIT_FAILURE;
}
- for (int i = 0; i < outputSize; ++i)
+ if (!params.m_DontPrintOutputs)
{
- printf("%d ", tfLiteDelageOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ printf("%d ", tfLiteDelageOutputData[i]);
+ }
}
}
else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 ||
@@ -284,9 +293,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
return EXIT_FAILURE;
}
- for (int i = 0; i < outputSize; ++i)
+ if (!params.m_DontPrintOutputs)
{
- printf("%u ", tfLiteDelageOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ printf("%u ", tfLiteDelageOutputData[i]);
+ }
}
}
else
@@ -472,6 +484,10 @@ int MainImpl(const ExecuteNetworkParams& params,
{
ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
}
+ if (params.m_DontPrintOutputs)
+ {
+ ARMNN_LOG(info) << "Printing outputs to console is disabled.";
+ }
// Print output tensors
const auto& infosOut = model.GetOutputBindingInfos();
@@ -479,9 +495,9 @@ int MainImpl(const ExecuteNetworkParams& params,
{
const armnn::TensorInfo& infoOut = infosOut[i].second;
- // We've made sure before that the number of output files either equals numOutputs, in which case
- // we override those files when processing the results of each iteration (only the result of the
- // last iteration will be stored), or there are enough
+ // We've made sure before that the number of output files either equals numOutputs, in which
+ // case we override those files when processing the results of each iteration (only the result
+ // of the last iteration will be stored), or there are enough
// output files for each output of each iteration.
size_t outputFileIndex = x * numOutputs + i;
if (!params.m_OutputTensorFiles.empty())
@@ -499,7 +515,8 @@ int MainImpl(const ExecuteNetworkParams& params,
TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
infoOut,
outputTensorFile,
- params.m_DequantizeOutput);
+ params.m_DequantizeOutput,
+ !params.m_DontPrintOutputs);
mapbox::util::apply_visitor(printer, outputs[x][i]);
}
@@ -575,14 +592,18 @@ int MainImpl(const ExecuteNetworkParams& params,
{
ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
}
+ if (params.m_DontPrintOutputs)
+ {
+ ARMNN_LOG(info) << "Printing outputs to console is disabled.";
+ }
// Print output tensors
const auto& infosOut = model.GetOutputBindingInfos();
for (size_t i = 0; i < numOutputs; i++)
{
// We've made sure before that the number of output files either equals numOutputs, in which
- // case we override those files when processing the results of each iteration (only the result
- // of the last iteration will be stored), or there are enough
+ // case we override those files when processing the results of each iteration (only the
+ // result of the last iteration will be stored), or there are enough
// output files for each output of each iteration.
size_t outputFileIndex = iteration * numOutputs + i;
if (!params.m_OutputTensorFiles.empty())
@@ -602,7 +623,8 @@ int MainImpl(const ExecuteNetworkParams& params,
TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
infoOut,
outputTensorFile,
- params.m_DequantizeOutput);
+ params.m_DequantizeOutput,
+ !params.m_DontPrintOutputs);
mapbox::util::apply_visitor(printer, inferenceOutputMap.at(cb->GetInferenceId())[i]);
}
@@ -683,14 +705,18 @@ int MainImpl(const ExecuteNetworkParams& params,
{
ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
}
+ if (params.m_DontPrintOutputs)
+ {
+ ARMNN_LOG(info) << "Printing outputs to console is disabled.";
+ }
// Print output tensors
const auto& infosOut = model.GetOutputBindingInfos();
for (size_t i = 0; i < numOutputs; i++)
{
// We've made sure before that the number of output files either equals numOutputs, in which
- // case we override those files when processing the results of each iteration (only the result
- // of the last iteration will be stored), or there are enough
+ // case we override those files when processing the results of each iteration (only the
+ // result of the last iteration will be stored), or there are enough
// output files for each output of each iteration.
size_t outputFileIndex = j * numOutputs + i;
if (!params.m_OutputTensorFiles.empty())
@@ -709,7 +735,8 @@ int MainImpl(const ExecuteNetworkParams& params,
TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
infoOut,
outputTensorFile,
- params.m_DequantizeOutput);
+ params.m_DequantizeOutput,
+ !params.m_DontPrintOutputs);
mapbox::util::apply_visitor(printer, outputs[j][i]);
}
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index e519b028a0..db8194b3f9 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -50,6 +50,7 @@ struct ExecuteNetworkParams
std::vector<std::string> m_OutputTypes;
bool m_ParseUnsupported = false;
bool m_PrintIntermediate;
+ bool m_DontPrintOutputs;
bool m_QuantizeInput;
bool m_SaveCachedNetwork;
size_t m_SimultaneousIterations;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 927d804725..b1c87d088a 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -254,6 +254,11 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"Add unsupported operators as stand-in layers (where supported by parser)",
cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
+ ("do-not-print-output",
+ "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. "
+ "This behaviour can be changed by adding this flag to your command.",
+ cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true"))
+
("q,quantize-input",
"If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
"If unset, default to not quantized. Accepted values (true or false)",