diff options
author | Andre Ghattas <andre.ghattas@arm.com> | 2019-08-07 12:18:38 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-09-04 09:41:41 +0000 |
commit | 23ae2eae1caefba4948e6afda154a66238b26c2a (patch) | |
tree | 084b6e7b45add57a363826d1088c7821fe93e9e9 /tests | |
parent | 9bb51d7c3668f6b2715735f286ffd89b727d6805 (diff) | |
download | armnn-23ae2eae1caefba4948e6afda154a66238b26c2a.tar.gz |
IVGCVBENCH-1337 Added additional layer parameters to dot file and -v option
* Generic layer parameters now show up in dot file
* Convolution layer parameters have also been added to dot file
* ExecucteNetwork has an additional -v flag which generated dot file if there
Change-Id: I210bb19b45384eb3639b7e488c7a89049fa6f18d
Signed-off-by: Andre Ghattas <andre.ghattas@arm.com>
Signed-off-by: Szilard Papp <szilard.papp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 11 | ||||
-rw-r--r-- | tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp | 29 |
2 files changed, 25 insertions, 15 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 0761551762..828d24985b 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -86,6 +86,8 @@ int main(int argc, const char* argv[]) "If left empty (the default), the output tensors will not be written to a file.") ("event-based-profiling,e", po::bool_switch()->default_value(false), "Enables built in profiler. If unset, defaults to off.") + ("visualize-optimized-model,v", po::bool_switch()->default_value(false), + "Enables built optimized model visualizer. If unset, defaults to off.") ("fp16-turbo-mode,h", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, " "weights and biases will be converted to FP16 where the backend supports it") ("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0), @@ -132,6 +134,7 @@ int main(int argc, const char* argv[]) // Get the value of the switch arguments. bool concurrent = vm["concurrent"].as<bool>(); bool enableProfiling = vm["event-based-profiling"].as<bool>(); + bool enableLayerDetails = vm["visualize-optimized-model"].as<bool>(); bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>(); bool quantizeInput = vm["quantize-input"].as<bool>(); bool printIntermediate = vm["print-intermediate-layers"].as<bool>(); @@ -176,7 +179,8 @@ int main(int argc, const char* argv[]) { testCase.values.insert(testCase.values.begin(), executableName); results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime), - enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate)); + enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, + enableLayerDetails)); } // Check results @@ -195,7 +199,8 @@ int main(int argc, const char* argv[]) { testCase.values.insert(testCase.values.begin(), executableName); if (RunCsvTest(testCase, runtime, enableProfiling, - enableFp16TurboMode, thresholdTime, printIntermediate) != EXIT_SUCCESS) + enableFp16TurboMode, thresholdTime, printIntermediate, + enableLayerDetails) != EXIT_SUCCESS) { return EXIT_FAILURE; } @@ -231,6 +236,6 @@ int main(int argc, const char* argv[]) return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles, enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, - subgraphId); + subgraphId, enableLayerDetails); } } diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index 1813600fda..635eaf3af5 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -297,6 +297,7 @@ int MainImpl(const char* modelPath, const double& thresholdTime, bool printIntermediate, const size_t subgraphId, + bool enableLayerDetails = false, const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) { using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; @@ -312,6 +313,7 @@ int MainImpl(const char* modelPath, params.m_ComputeDevices = computeDevices; params.m_DynamicBackendsPath = dynamicBackendsPath; params.m_PrintIntermediateLayers = printIntermediate; + params.m_VisualizePostOptimizationModel = enableLayerDetails; for(const std::string& inputName: inputNames) { @@ -456,6 +458,7 @@ int RunTest(const std::string& format, const double& thresholdTime, bool printIntermediate, const size_t subgraphId, + bool enableLayerDetails = false, const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) { std::string modelFormat = boost::trim_copy(format); @@ -568,7 +571,7 @@ int RunTest(const std::string& format, dynamicBackendsPath, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, runtime); + enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, enableLayerDetails, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support."; return EXIT_FAILURE; @@ -584,7 +587,8 @@ int RunTest(const std::string& format, quantizeInput, outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling, enableFp16TurboMode, thresholdTime, - printIntermediate, subgraphId, runtime); + printIntermediate, subgraphId, enableLayerDetails, + runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; return EXIT_FAILURE; @@ -598,9 +602,9 @@ int RunTest(const std::string& format, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, - outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, - printIntermediate, subgraphId, runtime); + outputTensorFilesVector, enableProfiling, enableFp16TurboMode, + thresholdTime,printIntermediate, subgraphId, + enableLayerDetails, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; return EXIT_FAILURE; @@ -614,9 +618,9 @@ int RunTest(const std::string& format, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, - outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, - printIntermediate, subgraphId, runtime); + outputTensorFilesVector, enableProfiling, enableFp16TurboMode, + thresholdTime,printIntermediate, subgraphId, + enableLayerDetails, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; return EXIT_FAILURE; @@ -637,8 +641,8 @@ int RunTest(const std::string& format, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, - printIntermediate, subgraphId, runtime); + enableFp16TurboMode, thresholdTime, printIntermediate, + subgraphId, enableLayerDetails, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; @@ -655,7 +659,7 @@ int RunTest(const std::string& format, int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime, const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime, - const bool printIntermediate) + const bool printIntermediate, bool enableLayerDetails = false) { std::string modelFormat; std::string modelPath; @@ -767,5 +771,6 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles, - enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId); + enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, + enableLayerDetails); } |