diff options
Diffstat (limited to 'tests/ExecuteNetwork')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 63 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkParams.hpp | 1 | ||||
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp | 5 |
3 files changed, 51 insertions, 18 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 16d34c8c9d..8acab71290 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -238,9 +238,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, return EXIT_FAILURE; } - for (int i = 0; i < outputSize; ++i) + if (!params.m_DontPrintOutputs) { - printf("%f ", tfLiteDelageOutputData[i]); + for (int i = 0; i < outputSize; ++i) + { + printf("%f ", tfLiteDelageOutputData[i]); + } } } else if (params.m_OutputTypes[outputIndex].compare("int") == 0) @@ -253,9 +256,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, return EXIT_FAILURE; } - for (int i = 0; i < outputSize; ++i) + if (!params.m_DontPrintOutputs) { - printf("%d ", tfLiteDelageOutputData[i]); + for (int i = 0; i < outputSize; ++i) + { + printf("%d ", tfLiteDelageOutputData[i]); + } } } else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0) @@ -268,9 +274,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, return EXIT_FAILURE; } - for (int i = 0; i < outputSize; ++i) + if (!params.m_DontPrintOutputs) { - printf("%d ", tfLiteDelageOutputData[i]); + for (int i = 0; i < outputSize; ++i) + { + printf("%d ", tfLiteDelageOutputData[i]); + } } } else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 || @@ -284,9 +293,12 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, return EXIT_FAILURE; } - for (int i = 0; i < outputSize; ++i) + if (!params.m_DontPrintOutputs) { - printf("%u ", tfLiteDelageOutputData[i]); + for (int i = 0; i < outputSize; ++i) + { + printf("%u ", tfLiteDelageOutputData[i]); + } } } else @@ -472,6 +484,10 @@ int MainImpl(const ExecuteNetworkParams& params, { ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful"; } + if (params.m_DontPrintOutputs) + { + ARMNN_LOG(info) << "Printing outputs to console is disabled."; + } // Print output tensors const auto& infosOut = model.GetOutputBindingInfos(); @@ -479,9 +495,9 @@ int MainImpl(const ExecuteNetworkParams& params, { const armnn::TensorInfo& infoOut = infosOut[i].second; - // We've made sure before that the number of output files either equals numOutputs, in which case - // we override those files when processing the results of each iteration (only the result of the - // last iteration will be stored), or there are enough + // We've made sure before that the number of output files either equals numOutputs, in which + // case we override those files when processing the results of each iteration (only the result + // of the last iteration will be stored), or there are enough // output files for each output of each iteration. size_t outputFileIndex = x * numOutputs + i; if (!params.m_OutputTensorFiles.empty()) @@ -499,7 +515,8 @@ int MainImpl(const ExecuteNetworkParams& params, TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile, - params.m_DequantizeOutput); + params.m_DequantizeOutput, + !params.m_DontPrintOutputs); mapbox::util::apply_visitor(printer, outputs[x][i]); } @@ -575,14 +592,18 @@ int MainImpl(const ExecuteNetworkParams& params, { ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful"; } + if (params.m_DontPrintOutputs) + { + ARMNN_LOG(info) << "Printing outputs to console is disabled."; + } // Print output tensors const auto& infosOut = model.GetOutputBindingInfos(); for (size_t i = 0; i < numOutputs; i++) { // We've made sure before that the number of output files either equals numOutputs, in which - // case we override those files when processing the results of each iteration (only the result - // of the last iteration will be stored), or there are enough + // case we override those files when processing the results of each iteration (only the + // result of the last iteration will be stored), or there are enough // output files for each output of each iteration. size_t outputFileIndex = iteration * numOutputs + i; if (!params.m_OutputTensorFiles.empty()) @@ -602,7 +623,8 @@ int MainImpl(const ExecuteNetworkParams& params, TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile, - params.m_DequantizeOutput); + params.m_DequantizeOutput, + !params.m_DontPrintOutputs); mapbox::util::apply_visitor(printer, inferenceOutputMap.at(cb->GetInferenceId())[i]); } @@ -683,14 +705,18 @@ int MainImpl(const ExecuteNetworkParams& params, { ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful"; } + if (params.m_DontPrintOutputs) + { + ARMNN_LOG(info) << "Printing outputs to console is disabled."; + } // Print output tensors const auto& infosOut = model.GetOutputBindingInfos(); for (size_t i = 0; i < numOutputs; i++) { // We've made sure before that the number of output files either equals numOutputs, in which - // case we override those files when processing the results of each iteration (only the result - // of the last iteration will be stored), or there are enough + // case we override those files when processing the results of each iteration (only the + // result of the last iteration will be stored), or there are enough // output files for each output of each iteration. size_t outputFileIndex = j * numOutputs + i; if (!params.m_OutputTensorFiles.empty()) @@ -709,7 +735,8 @@ int MainImpl(const ExecuteNetworkParams& params, TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile, - params.m_DequantizeOutput); + params.m_DequantizeOutput, + !params.m_DontPrintOutputs); mapbox::util::apply_visitor(printer, outputs[j][i]); } diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp index e519b028a0..db8194b3f9 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp @@ -50,6 +50,7 @@ struct ExecuteNetworkParams std::vector<std::string> m_OutputTypes; bool m_ParseUnsupported = false; bool m_PrintIntermediate; + bool m_DontPrintOutputs; bool m_QuantizeInput; bool m_SaveCachedNetwork; size_t m_SimultaneousIterations; diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp index 927d804725..b1c87d088a 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp @@ -254,6 +254,11 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", "Add unsupported operators as stand-in layers (where supported by parser)", cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true")) + ("do-not-print-output", + "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. " + "This behaviour can be changed by adding this flag to your command.", + cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true")) + ("q,quantize-input", "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. " "If unset, default to not quantized. Accepted values (true or false)", |