aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-09-02 11:46:28 +0100
committerSadik Armagan <sadik.armagan@arm.com>2019-09-02 11:46:28 +0100
commit770862800da4776ba4dcebd441e1e609ffd26d14 (patch)
tree92b158a0761893808d95c12b10d87bad148eeb45 /tests
parent56e26ba60a7a42c657b1d36a38d7b766cf51e452 (diff)
downloadarmnn-770862800da4776ba4dcebd441e1e609ffd26d14.tar.gz
IVGCVSW-2945 ExecuteNetwork should have an option to save the output to a file
* Added "output-tensor-files,w" option to save output tensors to a file Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ida457177c5cbd7dca228772405fd505d03b61bf9
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp7
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp72
2 files changed, 66 insertions, 13 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index bccd50d929..0761551762 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -28,6 +28,7 @@ int main(int argc, const char* argv[])
std::string inputTypes;
std::string outputTypes;
std::string dynamicBackendsPath;
+ std::string outputTensorFiles;
double thresholdTime = 0.0;
@@ -80,6 +81,9 @@ int main(int argc, const char* argv[])
"Accepted values (float, int or qasymm8).")
("output-name,o", po::value(&outputNames),
"Identifier of the output tensors in the network separated by comma.")
+ ("write-outputs-to-file,w", po::value(&outputTensorFiles),
+ "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
+ "If left empty (the default), the output tensors will not be written to a file.")
("event-based-profiling,e", po::bool_switch()->default_value(false),
"Enables built in profiler. If unset, defaults to off.")
("fp16-turbo-mode,h", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, "
@@ -226,6 +230,7 @@ int main(int argc, const char* argv[])
return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
- enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
+ outputTensorFiles, enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
+ subgraphId);
}
}
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 92aa5066c0..1813600fda 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -199,27 +199,34 @@ void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
struct TensorPrinter : public boost::static_visitor<>
{
- TensorPrinter(const std::string& binding, const armnn::TensorInfo& info)
+ TensorPrinter(const std::string& binding, const armnn::TensorInfo& info, const std::string& outputTensorFile)
: m_OutputBinding(binding)
, m_Scale(info.GetQuantizationScale())
, m_Offset(info.GetQuantizationOffset())
+ , m_OutputTensorFile(outputTensorFile)
{}
void operator()(const std::vector<float>& values)
{
- ForEachValue(values, [](float value){
+ ForEachValue(values, [](float value)
+ {
printf("%f ", value);
});
+ WriteToFile(values);
}
void operator()(const std::vector<uint8_t>& values)
{
auto& scale = m_Scale;
auto& offset = m_Offset;
- ForEachValue(values, [&scale, &offset](uint8_t value)
+ std::vector<float> dequantizedValues;
+ ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
{
- printf("%f ", armnn::Dequantize(value, scale, offset));
+ auto dequantizedValue = armnn::Dequantize(value, scale, offset);
+ printf("%f ", dequantizedValue);
+ dequantizedValues.push_back(dequantizedValue);
});
+ WriteToFile(dequantizedValues);
}
void operator()(const std::vector<int>& values)
@@ -228,6 +235,7 @@ struct TensorPrinter : public boost::static_visitor<>
{
printf("%d ", value);
});
+ WriteToFile(values);
}
private:
@@ -242,9 +250,30 @@ private:
printf("\n");
}
+ template<typename T>
+ void WriteToFile(const std::vector<T>& values)
+ {
+ if (!m_OutputTensorFile.empty())
+ {
+ std::ofstream outputTensorFile;
+ outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
+ if (outputTensorFile.is_open())
+ {
+ outputTensorFile << m_OutputBinding << ": ";
+ std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
+ }
+ else
+ {
+ BOOST_LOG_TRIVIAL(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
+ }
+ outputTensorFile.close();
+ }
+ }
+
std::string m_OutputBinding;
float m_Scale=0.0f;
int m_Offset=0;
+ std::string m_OutputTensorFile;
};
@@ -262,6 +291,7 @@ int MainImpl(const char* modelPath,
bool quantizeInput,
const std::vector<string>& outputTypes,
const std::vector<string>& outputNames,
+ const std::vector<string>& outputTensorFiles,
bool enableProfiling,
bool enableFp16TurboMode,
const double& thresholdTime,
@@ -373,7 +403,8 @@ int MainImpl(const char* modelPath,
for (size_t i = 0; i < numOutputs; i++)
{
const armnn::TensorInfo& infoOut = infosOut[i].second;
- TensorPrinter printer(params.m_OutputBindings[i], infoOut);
+ auto outputTensorFile = outputTensorFiles.empty() ? "" : outputTensorFiles[i];
+ TensorPrinter printer(params.m_OutputBindings[i], infoOut, outputTensorFile);
boost::apply_visitor(printer, outputDataContainers[i]);
}
@@ -419,6 +450,7 @@ int RunTest(const std::string& format,
bool quantizeInput,
const std::string& outputTypes,
const std::string& outputNames,
+ const std::string& outputTensorFiles,
bool enableProfiling,
bool enableFp16TurboMode,
const double& thresholdTime,
@@ -435,6 +467,7 @@ int RunTest(const std::string& format,
std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
+ std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
// Parse model binary flag from the model-format string we got from the command-line
bool isModelBinary;
@@ -465,6 +498,13 @@ int RunTest(const std::string& format,
return EXIT_FAILURE;
}
+ if ((outputTensorFilesVector.size() != 0) &&
+ (outputTensorFilesVector.size() != outputNamesVector.size()))
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
+ return EXIT_FAILURE;
+ }
+
if (inputTypesVector.size() == 0)
{
//Defaults the value of all inputs to "float"
@@ -527,7 +567,7 @@ int RunTest(const std::string& format,
modelPath.c_str(), isModelBinary, computeDevice,
dynamicBackendsPath, inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
- outputTypesVector, outputNamesVector, enableProfiling,
+ outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling,
enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support.";
@@ -542,7 +582,8 @@ int RunTest(const std::string& format,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
- enableProfiling, enableFp16TurboMode, thresholdTime,
+ outputTensorFilesVector, enableProfiling,
+ enableFp16TurboMode, thresholdTime,
printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
@@ -557,7 +598,8 @@ int RunTest(const std::string& format,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
- enableProfiling, enableFp16TurboMode, thresholdTime,
+ outputTensorFilesVector, enableProfiling,
+ enableFp16TurboMode, thresholdTime,
printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
@@ -572,7 +614,8 @@ int RunTest(const std::string& format,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
- enableProfiling, enableFp16TurboMode, thresholdTime,
+ outputTensorFilesVector, enableProfiling,
+ enableFp16TurboMode, thresholdTime,
printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
@@ -593,7 +636,8 @@ int RunTest(const std::string& format,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
- enableProfiling, enableFp16TurboMode, thresholdTime,
+ outputTensorFilesVector, enableProfiling,
+ enableFp16TurboMode, thresholdTime,
printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
@@ -622,6 +666,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
std::string inputTypes;
std::string outputTypes;
std::string dynamicBackendsPath;
+ std::string outputTensorFiles;
size_t subgraphId = 0;
@@ -664,7 +709,10 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
"If unset, defaults to \"float\" for all defined outputs. "
"Accepted values (float, int or qasymm8).")
("output-name,o", po::value(&outputNames),
- "Identifier of the output tensors in the network separated by comma.");
+ "Identifier of the output tensors in the network separated by comma.")
+ ("write-outputs-to-file,w", po::value(&outputTensorFiles),
+ "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
+ "If left empty (the default), the output tensors will not be written to a file.");
}
catch (const std::exception& e)
{
@@ -718,6 +766,6 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
}
return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
- inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
+ inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
}