ArmNN
 20.02
ExecuteNetwork.cpp File Reference

Go to the source code of this file.

Functions

int main (int argc, const char *argv[])
 

Function Documentation

◆ main()

int main ( int  argc,
const char *  argv[] 
)

Definition at line 9 of file ExecuteNetwork.cpp.

References ARMNN_LOG, armnn::BackendRegistryInstance(), armnn::ConfigureLogging(), IRuntime::Create(), armnn::Debug, BackendRegistry::GetBackendIdsAsString(), armnn::Info, IRuntime::CreationOptions::ExternalProfilingOptions::m_CapturePeriod, IRuntime::CreationOptions::m_DynamicBackendsPath, IRuntime::CreationOptions::m_EnableGpuProfiling, IRuntime::CreationOptions::ExternalProfilingOptions::m_EnableProfiling, IRuntime::CreationOptions::ExternalProfilingOptions::m_FileOnly, IRuntime::CreationOptions::ExternalProfilingOptions::m_IncomingCaptureFile, IRuntime::CreationOptions::ExternalProfilingOptions::m_OutgoingCaptureFile, IRuntime::CreationOptions::m_ProfilingOptions, options, CsvReader::ParseFile(), RunCsvTest(), and RunTest().

10 {
11  // Configures logging for both the ARMNN library and this test program.
12 #ifdef NDEBUG
14 #else
16 #endif
17  armnn::ConfigureLogging(true, true, level);
18 
19  std::string testCasesFile;
20 
21  std::string modelFormat;
22  std::string modelPath;
23  std::string inputNames;
24  std::string inputTensorShapes;
25  std::string inputTensorDataFilePaths;
26  std::string outputNames;
27  std::string inputTypes;
28  std::string outputTypes;
29  std::string dynamicBackendsPath;
30  std::string outputTensorFiles;
31 
32  // external profiling parameters
33  std::string outgoingCaptureFile;
34  std::string incomingCaptureFile;
35  uint32_t counterCapturePeriod;
36 
37  double thresholdTime = 0.0;
38 
39  size_t subgraphId = 0;
40 
41  const std::string backendsMessage = "REQUIRED: Which device to run layers on by default. Possible choices: "
43  po::options_description desc("Options");
44  try
45  {
46  desc.add_options()
47  ("help", "Display usage information")
48  ("compute,c", po::value<std::vector<std::string>>()->multitoken()->required(),
49  backendsMessage.c_str())
50  ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. "
51  "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, "
52  "as they are expected to be defined in the file for each test in particular.")
53  ("concurrent,n", po::bool_switch()->default_value(false),
54  "Whether or not the test cases should be executed in parallel")
55  ("model-format,f", po::value(&modelFormat)->required(),
56  "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
57  "tensorflow-text.")
58  ("model-path,m", po::value(&modelPath)->required(), "Path to model file, e.g. .armnn, .caffemodel, "
59  ".prototxt, .tflite, .onnx")
60  ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
61  "Path where to load any available dynamic backend from. "
62  "If left empty (the default), dynamic backends will not be used.")
63  ("input-name,i", po::value(&inputNames),
64  "Identifier of the input tensors in the network separated by comma.")
65  ("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
66  "Defaults to 0")
67  ("input-tensor-shape,s", po::value(&inputTensorShapes),
68  "The shape of the input tensors in the network as a flat array of integers separated by comma."
69  "Several shapes can be passed by separating them with a colon (:)."
70  "This parameter is optional, depending on the network.")
71  ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
72  "Path to files containing the input data as a flat array separated by whitespace. "
73  "Several paths can be passed by separating them with a comma. If not specified, the network will be run "
74  "with dummy data (useful for profiling).")
75  ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
76  "If unset, defaults to \"float\" for all defined inputs. "
77  "Accepted values (float, int or qasymm8)")
78  ("quantize-input,q",po::bool_switch()->default_value(false),
79  "If this option is enabled, all float inputs will be quantized to qasymm8. "
80  "If unset, default to not quantized. "
81  "Accepted values (true or false)")
82  ("output-type,z",po::value(&outputTypes),
83  "The type of the output tensors in the network separated by comma. "
84  "If unset, defaults to \"float\" for all defined outputs. "
85  "Accepted values (float, int or qasymm8).")
86  ("dequantize-output,l",po::bool_switch()->default_value(false),
87  "If this option is enabled, all quantized outputs will be dequantized to float. "
88  "If unset, default to not get dequantized. "
89  "Accepted values (true or false)")
90  ("output-name,o", po::value(&outputNames),
91  "Identifier of the output tensors in the network separated by comma.")
92  ("write-outputs-to-file,w", po::value(&outputTensorFiles),
93  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
94  "If left empty (the default), the output tensors will not be written to a file.")
95  ("event-based-profiling,e", po::bool_switch()->default_value(false),
96  "Enables built in profiler. If unset, defaults to off.")
97  ("visualize-optimized-model,v", po::bool_switch()->default_value(false),
98  "Enables built optimized model visualizer. If unset, defaults to off.")
99  ("fp16-turbo-mode,h", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, "
100  "weights and biases will be converted to FP16 where the backend supports it")
101  ("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
102  "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
103  "inference time is greater than the threshold time, the test will fail. By default, no threshold "
104  "time is used.")
105  ("print-intermediate-layers,p", po::bool_switch()->default_value(false),
106  "If this option is enabled, the output of every graph layer will be printed.")
107  ("enable-external-profiling,a", po::bool_switch()->default_value(false),
108  "If enabled external profiling will be switched on")
109  ("outgoing-capture-file,j", po::value(&outgoingCaptureFile),
110  "If specified the outgoing external profiling packets will be captured in this binary file")
111  ("incoming-capture-file,k", po::value(&incomingCaptureFile),
112  "If specified the incoming external profiling packets will be captured in this binary file")
113  ("file-only-external-profiling,g", po::bool_switch()->default_value(false),
114  "If enabled then the 'file-only' test mode of external profiling will be enabled")
115  ("counter-capture-period,u", po::value<uint32_t>(&counterCapturePeriod)->default_value(150u),
116  "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test")
117  ("parse-unsupported", po::bool_switch()->default_value(false),
118  "Add unsupported operators as stand-in layers (where supported by parser)");
119  }
120  catch (const std::exception& e)
121  {
122  // Coverity points out that default_value(...) can throw a bad_lexical_cast,
123  // and that desc.add_options() can throw boost::io::too_few_args.
124  // They really won't in any of these cases.
125  BOOST_ASSERT_MSG(false, "Caught unexpected exception");
126  ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
127  return EXIT_FAILURE;
128  }
129 
130  // Parses the command-line.
131  po::variables_map vm;
132  try
133  {
134  po::store(po::parse_command_line(argc, argv, desc), vm);
135 
136  if (CheckOption(vm, "help") || argc <= 1)
137  {
138  std::cout << "Executes a neural network model using the provided input tensor. " << std::endl;
139  std::cout << "Prints the resulting output tensor." << std::endl;
140  std::cout << std::endl;
141  std::cout << desc << std::endl;
142  return EXIT_SUCCESS;
143  }
144 
145  po::notify(vm);
146  }
147  catch (const po::error& e)
148  {
149  std::cerr << e.what() << std::endl << std::endl;
150  std::cerr << desc << std::endl;
151  return EXIT_FAILURE;
152  }
153 
154  // Get the value of the switch arguments.
155  bool concurrent = vm["concurrent"].as<bool>();
156  bool enableProfiling = vm["event-based-profiling"].as<bool>();
157  bool enableLayerDetails = vm["visualize-optimized-model"].as<bool>();
158  bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>();
159  bool quantizeInput = vm["quantize-input"].as<bool>();
160  bool dequantizeOutput = vm["dequantize-output"].as<bool>();
161  bool printIntermediate = vm["print-intermediate-layers"].as<bool>();
162  bool enableExternalProfiling = vm["enable-external-profiling"].as<bool>();
163  bool fileOnlyExternalProfiling = vm["file-only-external-profiling"].as<bool>();
164  bool parseUnsupported = vm["parse-unsupported"].as<bool>();
165 
166 
167  // Check whether we have to load test cases from a file.
168  if (CheckOption(vm, "test-cases"))
169  {
170  // Check that the file exists.
171  if (!boost::filesystem::exists(testCasesFile))
172  {
173  ARMNN_LOG(fatal) << "Given file \"" << testCasesFile << "\" does not exist";
174  return EXIT_FAILURE;
175  }
176 
177  // Parse CSV file and extract test cases
178  armnnUtils::CsvReader reader;
179  std::vector<armnnUtils::CsvRow> testCases = reader.ParseFile(testCasesFile);
180 
181  // Check that there is at least one test case to run
182  if (testCases.empty())
183  {
184  ARMNN_LOG(fatal) << "Given file \"" << testCasesFile << "\" has no test cases";
185  return EXIT_FAILURE;
186  }
187 
188  // Create runtime
190  options.m_EnableGpuProfiling = enableProfiling;
191  options.m_DynamicBackendsPath = dynamicBackendsPath;
192  options.m_ProfilingOptions.m_EnableProfiling = enableExternalProfiling;
193  options.m_ProfilingOptions.m_IncomingCaptureFile = incomingCaptureFile;
194  options.m_ProfilingOptions.m_OutgoingCaptureFile = outgoingCaptureFile;
195  options.m_ProfilingOptions.m_FileOnly = fileOnlyExternalProfiling;
196  options.m_ProfilingOptions.m_CapturePeriod = counterCapturePeriod;
197  std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
198 
199  const std::string executableName("ExecuteNetwork");
200 
201  // Check whether we need to run the test cases concurrently
202  if (concurrent)
203  {
204  std::vector<std::future<int>> results;
205  results.reserve(testCases.size());
206 
207  // Run each test case in its own thread
208  for (auto& testCase : testCases)
209  {
210  testCase.values.insert(testCase.values.begin(), executableName);
211  results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
212  enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
213  enableLayerDetails, parseUnsupported));
214  }
215 
216  // Check results
217  for (auto& result : results)
218  {
219  if (result.get() != EXIT_SUCCESS)
220  {
221  return EXIT_FAILURE;
222  }
223  }
224  }
225  else
226  {
227  // Run tests sequentially
228  for (auto& testCase : testCases)
229  {
230  testCase.values.insert(testCase.values.begin(), executableName);
231  if (RunCsvTest(testCase, runtime, enableProfiling,
232  enableFp16TurboMode, thresholdTime, printIntermediate,
233  enableLayerDetails, parseUnsupported) != EXIT_SUCCESS)
234  {
235  return EXIT_FAILURE;
236  }
237  }
238  }
239 
240  return EXIT_SUCCESS;
241  }
242  else // Run single test
243  {
244  // Get the preferred order of compute devices. If none are specified, default to using CpuRef
245  const std::string computeOption("compute");
246  std::vector<std::string> computeDevicesAsStrings =
247  CheckOption(vm, computeOption.c_str()) ?
248  vm[computeOption].as<std::vector<std::string>>() :
249  std::vector<std::string>();
250  std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
251 
252  // Remove duplicates from the list of compute devices.
253  RemoveDuplicateDevices(computeDevices);
254 
255  try
256  {
257  CheckOptionDependencies(vm);
258  }
259  catch (const po::error& e)
260  {
261  std::cerr << e.what() << std::endl << std::endl;
262  std::cerr << desc << std::endl;
263  return EXIT_FAILURE;
264  }
265  // Create runtime
267  options.m_EnableGpuProfiling = enableProfiling;
268  options.m_DynamicBackendsPath = dynamicBackendsPath;
269  options.m_ProfilingOptions.m_EnableProfiling = enableExternalProfiling;
270  options.m_ProfilingOptions.m_IncomingCaptureFile = incomingCaptureFile;
271  options.m_ProfilingOptions.m_OutgoingCaptureFile = outgoingCaptureFile;
272  options.m_ProfilingOptions.m_FileOnly = fileOnlyExternalProfiling;
273  options.m_ProfilingOptions.m_CapturePeriod = counterCapturePeriod;
274  std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
275 
276  return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
277  inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
278  outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, thresholdTime,
279  printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, runtime);
280  }
281 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
int RunTest(const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool dequantizeOuput, bool enableProfiling, bool enableFp16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)
Configures the logging behaviour of the ARMNN library.
Definition: Utils.cpp:10
int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr< armnn::IRuntime > &runtime, const bool enableProfiling, const bool enableFp16TurboMode, const double &thresholdTime, const bool printIntermediate, bool enableLayerDetails=false, bool parseUnuspported=false)
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
BackendRegistry & BackendRegistryInstance()
std::string GetBackendIdsAsString() const
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:58
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:54
static std::vector< CsvRow > ParseFile(const std::string &csvFile)
Definition: CsvReader.cpp:32
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:76
LogSeverity
Definition: Utils.hpp:12