aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-08-27 15:35:59 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-08-28 14:07:28 +0000
commit54658b9c8cc27d513f35e078b8586767262d07f2 (patch)
tree78af07b574caf9b9476551c894fc8f03e56df95a /tests
parente89ebad9cd78096d9c18a28fa01337dd622f5081 (diff)
downloadarmnn-54658b9c8cc27d513f35e078b8586767262d07f2.tar.gz
IVGCVSW-3675 Add ExecuteNetwork option to print intermediate layers
Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: Id7ab186ec607ff6e5ee6869c4ad562af4c40b97a
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp12
-rw-r--r--tests/InferenceModel.hpp3
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp18
3 files changed, 22 insertions, 11 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 1a0306244b..bccd50d929 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -87,7 +87,9 @@ int main(int argc, const char* argv[])
("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
"Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
"inference time is greater than the threshold time, the test will fail. By default, no threshold "
- "time is used.");
+ "time is used.")
+ ("print-intermediate-layers,p", po::bool_switch()->default_value(false),
+ "If this option is enabled, the output of every graph layer will be printed.");
}
catch (const std::exception& e)
{
@@ -128,6 +130,7 @@ int main(int argc, const char* argv[])
bool enableProfiling = vm["event-based-profiling"].as<bool>();
bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>();
bool quantizeInput = vm["quantize-input"].as<bool>();
+ bool printIntermediate = vm["print-intermediate-layers"].as<bool>();
// Check whether we have to load test cases from a file.
if (CheckOption(vm, "test-cases"))
@@ -169,7 +172,7 @@ int main(int argc, const char* argv[])
{
testCase.values.insert(testCase.values.begin(), executableName);
results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
- enableProfiling, enableFp16TurboMode, thresholdTime));
+ enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate));
}
// Check results
@@ -187,7 +190,8 @@ int main(int argc, const char* argv[])
for (auto& testCase : testCases)
{
testCase.values.insert(testCase.values.begin(), executableName);
- if (RunCsvTest(testCase, runtime, enableProfiling, enableFp16TurboMode, thresholdTime) != EXIT_SUCCESS)
+ if (RunCsvTest(testCase, runtime, enableProfiling,
+ enableFp16TurboMode, thresholdTime, printIntermediate) != EXIT_SUCCESS)
{
return EXIT_FAILURE;
}
@@ -222,6 +226,6 @@ int main(int argc, const char* argv[])
return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
- enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
+ enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
}
}
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 13e80319f4..0ede8983cf 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -91,6 +91,7 @@ struct Params
bool m_IsModelBinary;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
+ bool m_PrintIntermediateLayers;
Params()
: m_ComputeDevices{}
@@ -98,6 +99,7 @@ struct Params
, m_IsModelBinary(true)
, m_VisualizePostOptimizationModel(false)
, m_EnableFp16TurboMode(false)
+ , m_PrintIntermediateLayers(false)
{}
};
@@ -395,6 +397,7 @@ public:
armnn::OptimizerOptions options;
options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
+ options.m_Debug = params.m_PrintIntermediateLayers;
optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
if (!optNet)
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 810f499a9c..ddf797b3fe 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -265,6 +265,7 @@ int MainImpl(const char* modelPath,
bool enableProfiling,
bool enableFp16TurboMode,
const double& thresholdTime,
+ bool printIntermediate,
const size_t subgraphId,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
{
@@ -280,6 +281,7 @@ int MainImpl(const char* modelPath,
params.m_IsModelBinary = isModelBinary;
params.m_ComputeDevices = computeDevices;
params.m_DynamicBackendsPath = dynamicBackendsPath;
+ params.m_PrintIntermediateLayers = printIntermediate;
for(const std::string& inputName: inputNames)
{
@@ -420,6 +422,7 @@ int RunTest(const std::string& format,
bool enableProfiling,
bool enableFp16TurboMode,
const double& thresholdTime,
+ bool printIntermediate,
const size_t subgraphId,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
{
@@ -519,7 +522,7 @@ int RunTest(const std::string& format,
dynamicBackendsPath, inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
outputTypesVector, outputNamesVector, enableProfiling,
- enableFp16TurboMode, thresholdTime, subgraphId, runtime);
+ enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support.";
return EXIT_FAILURE;
@@ -534,7 +537,7 @@ int RunTest(const std::string& format,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
enableProfiling, enableFp16TurboMode, thresholdTime,
- subgraphId, runtime);
+ printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
return EXIT_FAILURE;
@@ -549,7 +552,7 @@ int RunTest(const std::string& format,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
enableProfiling, enableFp16TurboMode, thresholdTime,
- subgraphId, runtime);
+ printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
return EXIT_FAILURE;
@@ -564,7 +567,7 @@ int RunTest(const std::string& format,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
enableProfiling, enableFp16TurboMode, thresholdTime,
- subgraphId, runtime);
+ printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
return EXIT_FAILURE;
@@ -585,7 +588,7 @@ int RunTest(const std::string& format,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
enableProfiling, enableFp16TurboMode, thresholdTime,
- subgraphId, runtime);
+ printIntermediate, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
"'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
@@ -601,7 +604,8 @@ int RunTest(const std::string& format,
}
int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
- const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime)
+ const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
+ const bool printIntermediate)
{
std::string modelFormat;
std::string modelPath;
@@ -709,5 +713,5 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
- enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
+ enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
}