diff options
-rw-r--r-- | src/armnnUtils/TensorIOUtils.hpp | 9 | ||||
-rw-r--r-- | tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp | 26 |
2 files changed, 15 insertions, 20 deletions
diff --git a/src/armnnUtils/TensorIOUtils.hpp b/src/armnnUtils/TensorIOUtils.hpp index 47e0a320b8..07f3723279 100644 --- a/src/armnnUtils/TensorIOUtils.hpp +++ b/src/armnnUtils/TensorIOUtils.hpp @@ -37,11 +37,10 @@ inline armnn::InputTensors MakeInputTensors( { if (value.size() != inputBinding.second.GetNumElements()) { - std::ostringstream msg; - msg << "Input tensor has incorrect size (expected " - << inputBinding.second.GetNumElements() << " got " - << value.size(); - throw armnn::Exception(msg.str()); + throw armnn::Exception(boost::str(boost::format("Input tensor has incorrect size " + "(expected %1% got %2%)") + % inputBinding.second.GetNumElements() + % value.size())); } armnn::ConstTensor inputTensor(inputBinding.second, value.data()); diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index 004e9fbdb2..2556a104b5 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -471,24 +471,20 @@ int MainImpl(const ExecuteNetworkParams& params, // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds) auto inference_duration = model.Run(inputDataContainers, outputDataContainers); - // Print output tensors (if requested) - if (!params.m_OutputTensorFiles.empty()) + if (params.m_GenerateTensorData) { - if (params.m_GenerateTensorData) - { - BOOST_LOG_TRIVIAL(warning) << "Requested to write output to file, although the input was generated. " - << "Note that the output will not be useful."; - } + BOOST_LOG_TRIVIAL(warning) << "The input data was generated, note that the output will not be useful"; + } - const auto& infosOut = model.GetOutputBindingInfos(); - for (size_t i = 0; i < numOutputs; i++) - { - const armnn::TensorInfo& infoOut = infosOut[i].second; - auto outputTensorFile = params.m_OutputTensorFiles[i]; + // Print output tensors + const auto& infosOut = model.GetOutputBindingInfos(); + for (size_t i = 0; i < numOutputs; i++) + { + const armnn::TensorInfo& infoOut = infosOut[i].second; + auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i]; - TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile); - boost::apply_visitor(printer, outputDataContainers[i]); - } + TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile); + boost::apply_visitor(printer, outputDataContainers[i]); } BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2) |