ArmNN
 22.08
ExecuteNetworkProgramOptions.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/utility/Assert.hpp>
13 #include <armnn/Logging.hpp>
14 
15 #include <fmt/format.h>
16 
17 bool CheckOption(const cxxopts::ParseResult& result,
18  const char* option)
19 {
20  // Check that the given option is valid.
21  if (option == nullptr)
22  {
23  return false;
24  }
25 
26  // Check whether 'option' is provided.
27  return ((result.count(option)) ? true : false);
28 }
29 
30 void CheckOptionDependency(const cxxopts::ParseResult& result,
31  const char* option,
32  const char* required)
33 {
34  // Check that the given options are valid.
35  if (option == nullptr || required == nullptr)
36  {
37  throw cxxopts::OptionParseException("Invalid option to check dependency for");
38  }
39 
40  // Check that if 'option' is provided, 'required' is also provided.
41  if (CheckOption(result, option) && !result[option].has_default())
42  {
43  if (CheckOption(result, required) == 0 || result[required].has_default())
44  {
45  throw cxxopts::OptionParseException(
46  std::string("Option '") + option + "' requires option '" + required + "'.");
47  }
48  }
49 }
50 
51 void CheckOptionDependencies(const cxxopts::ParseResult& result)
52 {
53  CheckOptionDependency(result, "tuning-level", "tuning-path");
54 }
55 
56 void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
57 {
58  // Mark the duplicate devices as 'Undefined'.
59  for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
60  {
61  for (auto j = std::next(i); j != computeDevices.end(); ++j)
62  {
63  if (*j == *i)
64  {
66  }
67  }
68  }
69 
70  // Remove 'Undefined' devices.
71  computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
72  computeDevices.end());
73 }
74 
75 /// Takes a vector of backend strings and returns a vector of backendIDs.
76 /// Removes duplicate entries.
77 /// Can handle backend strings that contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
78 std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStringsVec)
79 {
80  std::vector<armnn::BackendId> backendIDs;
81  for (const auto& backendStrings : backendStringsVec)
82  {
83  // Each backendStrings might contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
84  std::vector<std::string> backendStringVec = ParseStringList(backendStrings, ",");
85  for (const auto& b : backendStringVec)
86  {
87  backendIDs.push_back(armnn::BackendId(b));
88  }
89  }
90 
91  RemoveDuplicateDevices(backendIDs);
92 
93  return backendIDs;
94 }
95 
96 /// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
97 /// If the option wasn't defined it returns an empty object.
98 template<typename optionType>
99 optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
100 {
101  optionType out;
102  if(result.count(optionName))
103  {
104  out = result[optionName].as<optionType>();
105  }
106  return out;
107 }
108 
109 void LogAndThrowFatal(std::string errorMessage)
110 {
111  throw armnn::InvalidArgumentException (errorMessage);
112 }
113 
114 void CheckRequiredOptions(const cxxopts::ParseResult& result)
115 {
116 
117  // For each option in option-group "a) Required
118  std::vector<std::string> requiredOptions{"compute",
119  "model-path"
120  };
121 
122  bool requiredMissing = false;
123  for(auto const& str : requiredOptions)
124  {
125  if(!(result.count(str) > 0))
126  {
127  ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
128  requiredMissing = true;
129  }
130  }
131  if(requiredMissing)
132  {
133  throw armnn::InvalidArgumentException ("Some required arguments are missing");
134  }
135 }
136 
137 void CheckForDeprecatedOptions(const cxxopts::ParseResult& result)
138 {
139  if(result.count("armnn-tflite-delegate") > 0)
140  {
141  ARMNN_LOG(warning) << "DEPRECATED: The program option 'armnn-tflite-delegate' is deprecated and will be "
142  "removed soon. Please use the option 'tflite-executor' instead.";
143  }
144  if(result.count("concurrent") > 0)
145  {
146  ARMNN_LOG(warning) << "DEPRECATED: The program option 'concurrent' is deprecated and will be "
147  "removed soon. Please use the option '\"P, thread-pool-size\"' instead.";
148  }
149  if(result.count("input-type") > 0)
150  {
151  ARMNN_LOG(warning) << "DEPRECATED: The program option 'input-type' is deprecated and will be "
152  "removed soon. The input-types are now automatically set.";
153  }
154  if(result.count("input-name") > 0)
155  {
156  ARMNN_LOG(warning) << "DEPRECATED: The program option 'input-name' is deprecated and will be "
157  "removed soon. The input-names are now automatically set.";
158  }
159  if(result.count("output-type") > 0)
160  {
161  ARMNN_LOG(warning) << "DEPRECATED: The program option 'output-type' is deprecated and will be "
162  "removed soon. The output-types are now automatically set.";
163  }
164  if(result.count("output-name") > 0)
165  {
166  ARMNN_LOG(warning) << "DEPRECATED: The program option 'output-name' is deprecated and will be "
167  "removed soon. The output-names are now automatically set.";
168  }
169  if(result.count("model-format") > 0)
170  {
171  ARMNN_LOG(warning) << "DEPRECATED: The program option 'model-format' is deprecated and will be "
172  "removed soon. The model-format is now automatically set.";
173  }
174 
175 }
176 
178 {
180 }
181 
183 {
186  {
187  LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
188  }
189 }
190 
191 
193  "Executes a neural network model using the provided input "
194  "tensor. Prints the resulting output tensor."}
195 {
196  try
197  {
198  // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
199  // separate function CheckRequiredOptions() for that.
200  m_CxxOptions.add_options("a) Required")
201  ("c,compute",
202  "Which device to run layers on by default. If a single device doesn't support all layers in the model "
203  "you can specify a second or third to fall back on. Possible choices: "
205  + " NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
206  "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c CpuAcc -c CpuRef'. "
207  "Duplicates are ignored.",
208  cxxopts::value<std::vector<std::string>>())
209 
210  ("f,model-format",
211  "armnn-binary, onnx-binary, onnx-text, tflite-binary"
212  "DEPRECATED: The program option 'model-format' is deprecated and will be "
213  "removed soon. The model-format is now automatically set.",
214  cxxopts::value<std::string>())
215 
216  ("m,model-path",
217  "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
218  cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
219 
220  ("i,input-name",
221  "Identifier of the input tensors in the network separated by comma."
222  "This option is not required, but can be used to set the order of inputs",
223  cxxopts::value<std::string>())
224 
225  ("o,output-name",
226  "Identifier of the output tensors in the network separated by comma."
227  "This option is not required, but can be used to set the order of outputs",
228  cxxopts::value<std::string>());
229 
230  m_CxxOptions.add_options("b) General")
231  ("b,dynamic-backends-path",
232  "Path where to load any available dynamic backend from. "
233  "If left empty (the default), dynamic backends will not be used.",
234  cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
235 
236  ("P, thread-pool-size",
237  "Run the network using the Arm NN thread pool with the number of threads provided. ",
238  cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"))
239 
240  ("n,concurrent",
241  "This option is for Arm NN internal asynchronous testing purposes. "
242  "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
243  "if 'thread-pool-size' is greater than 0, for asynchronous execution."
244  "DEPRECATED: The program option 'concurrent' is deprecated and will be "
245  "removed soon. Please use the option '\"P, thread-pool-size\"' instead.",
246  cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
247 
248  ("d,input-tensor-data",
249  "Path to files containing the input data as a flat array separated by whitespace. "
250  "Several paths can be passed by separating them with a comma if the network has multiple inputs "
251  "or you wish to run the model multiple times with different input data using the 'iterations' option. "
252  "If not specified, the network will be run with dummy data (useful for profiling).",
253  cxxopts::value<std::string>()->default_value(""))
254 
255  ("h,help", "Display usage information")
256 
257  ("infer-output-shape",
258  "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
259  "parser)",
260  cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
261 
262  ("allow-expanded-dims",
263  "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
264  "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
265  "This parameter may be removed in a later update. ",
266  cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
267  ->implicit_value("true"))
268 
269  ("I,iterations",
270  "Number of iterations to run the network for, default is set to 1. "
271  "If you wish to run the model with different input data for every execution you can do so by "
272  "supplying more input file paths to the 'input-tensor-data' option. "
273  "Note: The number of input files provided must be divisible by the number of inputs of the model. "
274  "e.g. Your model has 2 inputs and you supply 4 input files. If you set 'iterations' to 6 the first "
275  "run will consume the first two inputs, the second the next two and the last will begin from the "
276  "start and use the first two inputs again. "
277  "Note: If the 'concurrent' option is enabled all iterations will be run asynchronously.",
278  cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
279 
280  ("l,dequantize-output",
281  "If this option is enabled, all quantized outputs will be dequantized to float. "
282  "If unset, default to not get dequantized. "
283  "Accepted values (true or false)"
284  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
285  cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
286 
287  ("p,print-intermediate-layers",
288  "If this option is enabled, the output of every graph layer will be printed.",
289  cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
290  ->implicit_value("true"))
291 
292  ("parse-unsupported",
293  "Add unsupported operators as stand-in layers (where supported by parser)",
294  cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
295 
296  ("N,do-not-print-output",
297  "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. "
298  "This behaviour can be changed by adding this flag to your command.",
299  cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true"))
300 
301  ("q,quantize-input",
302  "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
303  "If unset, default to not quantized. Accepted values (true or false)"
304  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
305  cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
306 
307  ("r,threshold-time",
308  "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
309  "inference time is greater than the threshold time, the test will fail. By default, no threshold "
310  "time is used.",
311  cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
312 
313  ("s,input-tensor-shape",
314  "The shape of the input tensors in the network as a flat array of integers separated by comma."
315  "Several shapes can be passed by separating them with a colon (:).",
316  cxxopts::value<std::string>())
317 
318  ("v,visualize-optimized-model",
319  "Enables built optimized model visualizer. If unset, defaults to off.",
320  cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
321  ->implicit_value("true"))
322 
323  ("w,write-outputs-to-file",
324  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
325  "If left empty (the default), the output tensors will not be written to a file.",
326  cxxopts::value<std::string>())
327 
328  ("x,subgraph-number",
329  "Id of the subgraph to be executed. Defaults to 0."
330  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
331  cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
332 
333  ("y,input-type",
334  "The type of the input tensors in the network separated by comma. "
335  "If unset, defaults to \"float\" for all defined inputs. "
336  "Accepted values (float, int, qasymms8 or qasymmu8)."
337  "DEPRECATED: The program option 'input-type' is deprecated and will be "
338  "removed soon. The input-types are now automatically set.",
339  cxxopts::value<std::string>())
340 
341  ("z,output-type",
342  "The type of the output tensors in the network separated by comma. "
343  "If unset, defaults to \"float\" for all defined outputs. "
344  "Accepted values (float, int, qasymms8 or qasymmu8)."
345  "DEPRECATED: The program option 'output-type' is deprecated and will be "
346  "removed soon. The output-types are now automatically set.",
347  cxxopts::value<std::string>())
348 
349  ("T,tflite-executor",
350  "Set the executor for the tflite model: parser, delegate, tflite"
351  "parser is the ArmNNTfLiteParser, "
352  "delegate is the ArmNNTfLiteDelegate, "
353  "tflite is the TfliteInterpreter",
354  cxxopts::value<std::string>()->default_value("parser"))
355 
356  ("C, compare-output",
357  "Number of Arm NN threads to use when running the network asynchronously via the Arm NN thread pool. "
358  "The default is set to 0 which equals disabled. If 'thread-pool-size' is greater than 0 the "
359  "'concurrent' option is automatically set to true.",
360  cxxopts::value<std::string>(m_ExNetParams.m_ComparisonFile))
361 
362  ("B, compare-output-with-backend",
363  "Compare the output of the network with a different backend.",
364  cxxopts::value<std::vector<std::string>>())
365 
366  ("A, compare-with-tflite",
367  "Compare the output of the network with the tflite ref model.",
368  cxxopts::value<bool>(m_ExNetParams.m_CompareWithTflite)->default_value("false")
369  ->implicit_value("true"));
370 
371  m_CxxOptions.add_options("c) Optimization")
372  ("bf16-turbo-mode",
373  "If this option is enabled, FP32 layers, "
374  "weights and biases will be converted to BFloat16 where the backend supports it",
375  cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
376  ->default_value("false")->implicit_value("true"))
377 
378  ("enable-fast-math",
379  "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
380  "performance improvements but may result in reduced or different precision.",
381  cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
382 
383  ("number-of-threads",
384  "Assign the number of threads used by the CpuAcc backend. "
385  "Input value must be between 1 and 64. "
386  "Default is set to 0 (Backend will decide number of threads to use).",
387  cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
388 
389  ("save-cached-network",
390  "Enables saving of the cached network to a file given with the cached-network-filepath option. "
391  "See also --cached-network-filepath",
392  cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
393  ->default_value("false")->implicit_value("true"))
394 
395  ("cached-network-filepath",
396  "If non-empty, the given file will be used to load/save the cached network. "
397  "If save-cached-network is given then the cached network will be saved to the given file. "
398  "To save the cached network a file must already exist. "
399  "If save-cached-network is not given then the cached network will be loaded from the given file. "
400  "This will remove initial compilation time of kernels and speed up the first execution.",
401  cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
402 
403  ("fp16-turbo-mode",
404  "If this option is enabled, FP32 layers, "
405  "weights and biases will be converted to FP16 where the backend supports it",
406  cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
407  ->default_value("false")->implicit_value("true"))
408 
409  ("tuning-level",
410  "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
411  "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
412  "Requires tuning-path to be set, default is set to 0 (No tuning run)",
413  cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
414 
415  ("tuning-path",
416  "Path to tuning file. Enables use of CL tuning",
417  cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
418 
419  ("MLGOTuningFilePath",
420  "Path to tuning file. Enables use of CL MLGO tuning",
421  cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath))
422 
423  ("R, reuse-buffers",
424  "If enabled then the IO buffers will be reused for each inference",
425  cxxopts::value<bool>(m_ExNetParams.m_ReuseBuffers)->default_value("false")->implicit_value("true"));
426 
427  m_CxxOptions.add_options("d) Profiling")
428  ("a,enable-external-profiling",
429  "If enabled external profiling will be switched on",
431  ->default_value("false")->implicit_value("true"))
432 
433  ("e,event-based-profiling",
434  "Enables built in profiler. If unset, defaults to off.",
435  cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
436 
437  ("g,file-only-external-profiling",
438  "If enabled then the 'file-only' test mode of external profiling will be enabled",
439  cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
440  ->default_value("false")->implicit_value("true"))
441 
442  ("file-format",
443  "If profiling is enabled specifies the output file format",
444  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
445 
446  ("j,outgoing-capture-file",
447  "If specified the outgoing external profiling packets will be captured in this binary file",
448  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
449 
450  ("k,incoming-capture-file",
451  "If specified the incoming external profiling packets will be captured in this binary file",
452  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
453 
454  ("timeline-profiling",
455  "If enabled timeline profiling will be switched on, requires external profiling",
457  ->default_value("false")->implicit_value("true"))
458 
459  ("u,counter-capture-period",
460  "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
461  cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"))
462 
463  ("output-network-details",
464  "Outputs layer tensor infos and descriptors to std out along with profiling events. Defaults to off.",
465  cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsToStdOut)->default_value("false")
466  ->implicit_value("true"))
467 
468  ("output-network-details-only",
469  "Outputs layer tensor infos and descriptors to std out without profiling events. Defaults to off.",
470  cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsOnlyToStdOut)->default_value("false")
471  ->implicit_value("true"))
472 
473  ("import-inputs-if-aligned",
474  "In & Out tensors will be imported per inference if the memory alignment allows. Defaults to false.",
475  cxxopts::value<bool>(m_ExNetParams.m_ImportInputsIfAligned)->default_value("false")
476  ->implicit_value("true"));
477  }
478  catch (const std::exception& e)
479  {
480  ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
481  ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
482  exit(EXIT_FAILURE);
483  }
484 }
485 
486 ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
487 {
488  ParseOptions(ac, av);
489 }
490 
491 void ProgramOptions::ParseOptions(int ac, const char* av[])
492 {
493  // Parses the command-line.
494  m_CxxResult = m_CxxOptions.parse(ac, av);
495 
496  if (m_CxxResult.count("help") || ac <= 1)
497  {
498  std::cout << m_CxxOptions.help() << std::endl;
499  exit(EXIT_SUCCESS);
500  }
501 
505 
509  {
510  throw cxxopts::OptionParseException("You must enable profiling if you would like to output layer details");
511  }
512 
513  // Some options can't be assigned directly because they need some post-processing:
514  auto computeDevices = GetOptionValue<std::vector<std::string>>("compute", m_CxxResult);
515  m_ExNetParams.m_ComputeDevices = GetBackendIDs(computeDevices);
517  ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
519  ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
521  ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
523  ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
526 
528 
529  std::string tfliteExecutor = GetOptionValue<std::string>("tflite-executor", m_CxxResult);
530 
531  if (tfliteExecutor.size() == 0 || tfliteExecutor == "parser")
532  {
534  }
535  else if (tfliteExecutor == "delegate")
536  {
538  }
539  else if (tfliteExecutor == "tflite")
540  {
542  }
543  else
544  {
545  ARMNN_LOG(info) << fmt::format("Invalid tflite-executor option '{}'.", tfliteExecutor);
546  throw armnn::InvalidArgumentException ("Invalid tflite-executor option");
547  }
548 
549  // For backwards compatibility when deprecated options are used
551  {
553  }
554 
555  // Set concurrent to true if the user expects to run inferences asynchronously
557  {
559  }
560 
562  {
564  }
565 
566  // Parse input tensor shape from the string we got from the command-line.
567  std::vector<std::string> inputTensorShapesVector =
568  ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
569 
570  if (!inputTensorShapesVector.empty())
571  {
572  m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
573 
574  for(const std::string& shape : inputTensorShapesVector)
575  {
576  std::stringstream ss(shape);
577  std::vector<unsigned int> dims = ParseArray(ss);
578 
580  armnn::TensorShape{static_cast<unsigned int>(dims.size()), dims.data()});
581  }
582  }
583 
584  // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
586 
587  // Parse CL tuning parameters to runtime options
588  if (!m_ExNetParams.m_TuningPath.empty())
589  {
590  m_RuntimeOptions.m_BackendOptions.emplace_back(
592  {
593  "GpuAcc",
594  {
595  {"TuningLevel", m_ExNetParams.m_TuningLevel},
596  {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
597  {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
598  {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
599  }
600  }
601  );
602  }
603 
605 
606  auto comparisonComputDevices = GetOptionValue<std::vector<std::string>>("compare-output-with-backend", m_CxxResult);
607 
608  if (!comparisonComputDevices.empty())
609  {
610  m_ExNetParams.m_ComparisonComputeDevices = GetBackendIDs(comparisonComputDevices);
611  }
612 }
613 
ExecuteNetworkParams m_ExNetParams
void ValidateExecuteNetworkParams()
Ensures that the parameters for ExecuteNetwork fit together.
optionType GetOptionValue(std::string &&optionName, const cxxopts::ParseResult &result)
Provides a segfault safe way to get cxxopts option values by checking if the option was defined...
std::string m_OutgoingCaptureFile
Path to a file in which outgoing timeline profiling messages will be stored.
Definition: IRuntime.hpp:142
std::vector< unsigned int > ParseArray(std::istream &stream)
void RemoveDuplicateDevices(std::vector< armnn::BackendId > &computeDevices)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
std::vector< armnn::TensorShape > m_InputTensorShapes
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
BackendRegistry & BackendRegistryInstance()
std::vector< armnn::BackendId > GetBackendIDs(const std::vector< std::string > &backendStringsVec)
Takes a vector of backend strings and returns a vector of backendIDs.
std::vector< std::string > m_OutputNames
std::string GetBackendIdsAsString() const
std::string m_IncomingCaptureFile
Path to a file in which incoming timeline profiling messages will be stored.
Definition: IRuntime.hpp:144
std::vector< std::string > m_OutputTensorFiles
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
Definition: IRuntime.hpp:138
bool m_FileOnly
Enable profiling output to file only.
Definition: IRuntime.hpp:146
std::vector< armnn::BackendId > m_ComparisonComputeDevices
bool CheckOption(const cxxopts::ParseResult &result, const char *option)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:189
ProgramOptions()
Initializes ProgramOptions by adding options to the underlying cxxopts::options object.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::vector< std::string > m_InputNames
std::vector< std::string > m_InputTensorDataFilePaths
void ParseOptions(int ac, const char *av[])
Parses program options from the command line or another source and stores the values in member variab...
void CheckForDeprecatedOptions(const cxxopts::ParseResult &result)
Struct for the users to pass backend specific options.
void LogAndThrowFatal(std::string errorMessage)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:98
Holds and parses program options for the ExecuteNetwork application.
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:93
void CheckOptionDependency(const cxxopts::ParseResult &result, const char *option, const char *required)
void ValidateRuntimeOptions()
Ensures that the runtime options are valid.
uint32_t m_CapturePeriod
The duration at which captured profiling messages will be flushed.
Definition: IRuntime.hpp:148
void CheckOptionDependencies(const cxxopts::ParseResult &result)
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
Definition: IRuntime.hpp:140
cxxopts::ParseResult m_CxxResult
void CheckRequiredOptions(const cxxopts::ParseResult &result)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:153
std::string m_FileFormat
The format of the file used for outputting profiling data.
Definition: IRuntime.hpp:150