ArmNN
 22.05.01
ExecuteNetworkProgramOptions.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include "InferenceTest.hpp"
9 
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/utility/Assert.hpp>
14 #include <armnn/Logging.hpp>
15 
16 #include <fmt/format.h>
17 
18 bool CheckOption(const cxxopts::ParseResult& result,
19  const char* option)
20 {
21  // Check that the given option is valid.
22  if (option == nullptr)
23  {
24  return false;
25  }
26 
27  // Check whether 'option' is provided.
28  return ((result.count(option)) ? true : false);
29 }
30 
31 void CheckOptionDependency(const cxxopts::ParseResult& result,
32  const char* option,
33  const char* required)
34 {
35  // Check that the given options are valid.
36  if (option == nullptr || required == nullptr)
37  {
38  throw cxxopts::OptionParseException("Invalid option to check dependency for");
39  }
40 
41  // Check that if 'option' is provided, 'required' is also provided.
42  if (CheckOption(result, option) && !result[option].has_default())
43  {
44  if (CheckOption(result, required) == 0 || result[required].has_default())
45  {
46  throw cxxopts::OptionParseException(
47  std::string("Option '") + option + "' requires option '" + required + "'.");
48  }
49  }
50 }
51 
52 void CheckOptionDependencies(const cxxopts::ParseResult& result)
53 {
54  CheckOptionDependency(result, "model-path", "model-format");
55  CheckOptionDependency(result, "input-tensor-shape", "model-path");
56  CheckOptionDependency(result, "tuning-level", "tuning-path");
57 }
58 
59 void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
60 {
61  // Mark the duplicate devices as 'Undefined'.
62  for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
63  {
64  for (auto j = std::next(i); j != computeDevices.end(); ++j)
65  {
66  if (*j == *i)
67  {
69  }
70  }
71  }
72 
73  // Remove 'Undefined' devices.
74  computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
75  computeDevices.end());
76 }
77 
78 /// Takes a vector of backend strings and returns a vector of backendIDs.
79 /// Removes duplicate entries.
80 /// Can handle backend strings that contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
81 std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStringsVec)
82 {
83  std::vector<armnn::BackendId> backendIDs;
84  for (const auto& backendStrings : backendStringsVec)
85  {
86  // Each backendStrings might contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
87  std::vector<std::string> backendStringVec = ParseStringList(backendStrings, ",");
88  for (const auto& b : backendStringVec)
89  {
90  backendIDs.push_back(armnn::BackendId(b));
91  }
92  }
93 
94  RemoveDuplicateDevices(backendIDs);
95 
96  return backendIDs;
97 }
98 
99 /// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
100 /// If the option wasn't defined it returns an empty object.
101 template<typename optionType>
102 optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
103 {
104  optionType out;
105  if(result.count(optionName))
106  {
107  out = result[optionName].as<optionType>();
108  }
109  return out;
110 }
111 
112 void LogAndThrowFatal(std::string errorMessage)
113 {
114  throw armnn::InvalidArgumentException (errorMessage);
115 }
116 
117 void CheckRequiredOptions(const cxxopts::ParseResult& result)
118 {
119 
120  // For each option in option-group "a) Required
121  std::vector<std::string> requiredOptions{"compute",
122  "model-format",
123  "model-path",
124  "input-name",
125  "output-name"};
126 
127  bool requiredMissing = false;
128  for(auto const& str : requiredOptions)
129  {
130  if(!(result.count(str) > 0))
131  {
132  ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
133  requiredMissing = true;
134  }
135  }
136  if(requiredMissing)
137  {
138  throw armnn::InvalidArgumentException ("Some required arguments are missing");
139  }
140 }
141 
142 void CheckForDeprecatedOptions(const cxxopts::ParseResult& result)
143 {
144  if(result.count("simultaneous-iterations") > 0)
145  {
146  ARMNN_LOG(warning) << "DEPRECATED: The program option 'simultaneous-iterations' is deprecated and will be "
147  "removed soon. Please use the option 'iterations' combined with 'concurrent' instead.";
148  }
149  if(result.count("armnn-tflite-delegate") > 0)
150  {
151  ARMNN_LOG(warning) << "DEPRECATED: The program option 'armnn-tflite-delegate' is deprecated and will be "
152  "removed soon. Please use the option 'tflite-executor' instead.";
153  }
154 }
155 
157 {
159 }
160 
162 {
165  {
166  LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
167  }
168 }
169 
170 
172  "Executes a neural network model using the provided input "
173  "tensor. Prints the resulting output tensor."}
174 {
175  try
176  {
177  // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
178  // separate function CheckRequiredOptions() for that.
179  m_CxxOptions.add_options("a) Required")
180  ("c,compute",
181  "Which device to run layers on by default. If a single device doesn't support all layers in the model "
182  "you can specify a second or third to fall back on. Possible choices: "
184  + " NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
185  "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c Cpuacc -c CpuRef'. "
186  "Duplicates are ignored.",
187  cxxopts::value<std::vector<std::string>>())
188 
189  ("f,model-format",
190  "armnn-binary, onnx-binary, onnx-text, tflite-binary",
191  cxxopts::value<std::string>())
192 
193  ("m,model-path",
194  "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
195  cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
196 
197  ("i,input-name",
198  "Identifier of the input tensors in the network separated by comma.",
199  cxxopts::value<std::string>())
200 
201  ("o,output-name",
202  "Identifier of the output tensors in the network separated by comma.",
203  cxxopts::value<std::string>());
204 
205  m_CxxOptions.add_options("b) General")
206  ("b,dynamic-backends-path",
207  "Path where to load any available dynamic backend from. "
208  "If left empty (the default), dynamic backends will not be used.",
209  cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
210 
211  ("n,concurrent",
212  "This option is for Arm NN internal asynchronous testing purposes. "
213  "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
214  "if 'thread-pool-size' is greater than 0, for asynchronous execution.",
215  cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
216 
217  ("d,input-tensor-data",
218  "Path to files containing the input data as a flat array separated by whitespace. "
219  "Several paths can be passed by separating them with a comma if the network has multiple inputs "
220  "or you wish to run the model multiple times with different input data using the 'iterations' option. "
221  "If not specified, the network will be run with dummy data (useful for profiling).",
222  cxxopts::value<std::string>()->default_value(""))
223 
224  ("h,help", "Display usage information")
225 
226  ("infer-output-shape",
227  "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
228  "parser)",
229  cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
230 
231  ("allow-expanded-dims",
232  "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
233  "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
234  "This parameter may be removed in a later update. ",
235  cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
236  ->implicit_value("true"))
237 
238  ("iterations",
239  "Number of iterations to run the network for, default is set to 1. "
240  "If you wish to run the model with different input data for every execution you can do so by "
241  "supplying more input file paths to the 'input-tensor-data' option. "
242  "Note: The number of input files provided must be divisible by the number of inputs of the model. "
243  "e.g. Your model has 2 inputs and you supply 4 input files. If you set 'iterations' to 6 the first "
244  "run will consume the first two inputs, the second the next two and the last will begin from the "
245  "start and use the first two inputs again. "
246  "Note: If the 'concurrent' option is enabled all iterations will be run asynchronously.",
247  cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
248 
249  ("l,dequantize-output",
250  "If this option is enabled, all quantized outputs will be dequantized to float. "
251  "If unset, default to not get dequantized. "
252  "Accepted values (true or false)"
253  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
254  cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
255 
256  ("p,print-intermediate-layers",
257  "If this option is enabled, the output of every graph layer will be printed.",
258  cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
259  ->implicit_value("true"))
260 
261  ("parse-unsupported",
262  "Add unsupported operators as stand-in layers (where supported by parser)",
263  cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
264 
265  ("N,do-not-print-output",
266  "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. "
267  "This behaviour can be changed by adding this flag to your command.",
268  cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true"))
269 
270  ("q,quantize-input",
271  "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
272  "If unset, default to not quantized. Accepted values (true or false)"
273  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
274  cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
275  ("r,threshold-time",
276  "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
277  "inference time is greater than the threshold time, the test will fail. By default, no threshold "
278  "time is used.",
279  cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
280 
281  ("s,input-tensor-shape",
282  "The shape of the input tensors in the network as a flat array of integers separated by comma."
283  "Several shapes can be passed by separating them with a colon (:).",
284  cxxopts::value<std::string>())
285 
286  ("v,visualize-optimized-model",
287  "Enables built optimized model visualizer. If unset, defaults to off.",
288  cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
289  ->implicit_value("true"))
290 
291  ("w,write-outputs-to-file",
292  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
293  "If left empty (the default), the output tensors will not be written to a file.",
294  cxxopts::value<std::string>())
295 
296  ("x,subgraph-number",
297  "Id of the subgraph to be executed. Defaults to 0."
298  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
299  cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
300 
301  ("y,input-type",
302  "The type of the input tensors in the network separated by comma. "
303  "If unset, defaults to \"float\" for all defined inputs. "
304  "Accepted values (float, int, qasymms8 or qasymmu8).",
305  cxxopts::value<std::string>())
306 
307  ("z,output-type",
308  "The type of the output tensors in the network separated by comma. "
309  "If unset, defaults to \"float\" for all defined outputs. "
310  "Accepted values (float, int, qasymms8 or qasymmu8).",
311  cxxopts::value<std::string>())
312 
313  ("T,tflite-executor",
314  "Set the executor for the tflite model: parser, delegate, tflite"
315  "parser is the ArmNNTfLiteParser, "
316  "delegate is the ArmNNTfLiteDelegate, "
317  "tflite is the TfliteInterpreter",
318  cxxopts::value<std::string>()->default_value("parser"))
319 
320  ("D,armnn-tflite-delegate",
321  "Enable Arm NN TfLite delegate. "
322  "DEPRECATED: This option is deprecated please use tflite-executor instead",
323  cxxopts::value<bool>(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true"))
324 
325  ("simultaneous-iterations",
326  "Number of simultaneous iterations to async-run the network for, default is set to 1 (disabled). "
327  "When thread-pool-size is set the Arm NN thread pool is used. Otherwise std::launch::async is used."
328  "DEPRECATED: This option is deprecated and will be removed soon. "
329  "Please use the option 'iterations' combined with 'concurrent' instead.",
330  cxxopts::value<size_t>(m_ExNetParams.m_SimultaneousIterations)->default_value("1"))
331 
332  ("thread-pool-size",
333  "Number of Arm NN threads to use when running the network asynchronously via the Arm NN thread pool. "
334  "The default is set to 0 which equals disabled. If 'thread-pool-size' is greater than 0 the "
335  "'concurrent' option is automatically set to true.",
336  cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"));
337 
338  m_CxxOptions.add_options("c) Optimization")
339  ("bf16-turbo-mode",
340  "If this option is enabled, FP32 layers, "
341  "weights and biases will be converted to BFloat16 where the backend supports it",
342  cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
343  ->default_value("false")->implicit_value("true"))
344 
345  ("enable-fast-math",
346  "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
347  "performance improvements but may result in reduced or different precision.",
348  cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
349 
350  ("number-of-threads",
351  "Assign the number of threads used by the CpuAcc backend. "
352  "Input value must be between 1 and 64. "
353  "Default is set to 0 (Backend will decide number of threads to use).",
354  cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
355 
356  ("save-cached-network",
357  "Enables saving of the cached network to a file given with the cached-network-filepath option. "
358  "See also --cached-network-filepath",
359  cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
360  ->default_value("false")->implicit_value("true"))
361 
362  ("cached-network-filepath",
363  "If non-empty, the given file will be used to load/save the cached network. "
364  "If save-cached-network is given then the cached network will be saved to the given file. "
365  "To save the cached network a file must already exist. "
366  "If save-cached-network is not given then the cached network will be loaded from the given file. "
367  "This will remove initial compilation time of kernels and speed up the first execution.",
368  cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
369 
370  ("fp16-turbo-mode",
371  "If this option is enabled, FP32 layers, "
372  "weights and biases will be converted to FP16 where the backend supports it",
373  cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
374  ->default_value("false")->implicit_value("true"))
375 
376  ("tuning-level",
377  "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
378  "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
379  "Requires tuning-path to be set, default is set to 0 (No tuning run)",
380  cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
381 
382  ("tuning-path",
383  "Path to tuning file. Enables use of CL tuning",
384  cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
385 
386  ("MLGOTuningFilePath",
387  "Path to tuning file. Enables use of CL MLGO tuning",
388  cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath))
389 
390  ("R, reuse-buffers",
391  "If enabled then the IO buffers will be reused for each inference",
392  cxxopts::value<bool>(m_ExNetParams.m_ReuseBuffers)->default_value("false")->implicit_value("true"));
393 
394  m_CxxOptions.add_options("d) Profiling")
395  ("a,enable-external-profiling",
396  "If enabled external profiling will be switched on",
398  ->default_value("false")->implicit_value("true"))
399 
400  ("e,event-based-profiling",
401  "Enables built in profiler. If unset, defaults to off.",
402  cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
403 
404  ("g,file-only-external-profiling",
405  "If enabled then the 'file-only' test mode of external profiling will be enabled",
406  cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
407  ->default_value("false")->implicit_value("true"))
408 
409  ("file-format",
410  "If profiling is enabled specifies the output file format",
411  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
412 
413  ("j,outgoing-capture-file",
414  "If specified the outgoing external profiling packets will be captured in this binary file",
415  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
416 
417  ("k,incoming-capture-file",
418  "If specified the incoming external profiling packets will be captured in this binary file",
419  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
420 
421  ("timeline-profiling",
422  "If enabled timeline profiling will be switched on, requires external profiling",
424  ->default_value("false")->implicit_value("true"))
425 
426  ("u,counter-capture-period",
427  "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
428  cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"))
429 
430  ("output-network-details",
431  "Outputs layer tensor infos and descriptors to std out along with profiling events. Defaults to off.",
432  cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsToStdOut)->default_value("false")
433  ->implicit_value("true"))
434  ("output-network-details-only",
435  "Outputs layer tensor infos and descriptors to std out without profiling events. Defaults to off.",
436  cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsOnlyToStdOut)->default_value("false")
437  ->implicit_value("true"))
438 
439  ("import-inputs-if-aligned",
440  "In & Out tensors will be imported per inference if the memory alignment allows. Defaults to false.",
441  cxxopts::value<bool>(m_ExNetParams.m_ImportInputsIfAligned)->default_value("false")
442  ->implicit_value("true"));
443  }
444  catch (const std::exception& e)
445  {
446  ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
447  ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
448  exit(EXIT_FAILURE);
449  }
450 }
451 
452 ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
453 {
454  ParseOptions(ac, av);
455 }
456 
457 void ProgramOptions::ParseOptions(int ac, const char* av[])
458 {
459  // Parses the command-line.
460  m_CxxResult = m_CxxOptions.parse(ac, av);
461 
462  if (m_CxxResult.count("help") || ac <= 1)
463  {
464  std::cout << m_CxxOptions.help() << std::endl;
465  exit(EXIT_SUCCESS);
466  }
467 
471 
472  // Some options can't be assigned directly because they need some post-processing:
473  auto computeDevices = GetOptionValue<std::vector<std::string>>("compute", m_CxxResult);
474  m_ExNetParams.m_ComputeDevices = GetBackendIDs(computeDevices);
476  armnn::stringUtils::StringTrimCopy(GetOptionValue<std::string>("model-format", m_CxxResult));
478  ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
480  ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
482  ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
484  ParseStringList(GetOptionValue<std::string>("input-type", m_CxxResult), ",");
486  ParseStringList(GetOptionValue<std::string>("output-type", m_CxxResult), ",");
488  ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
492 
494 
495  std::string tfliteExecutor = GetOptionValue<std::string>("tflite-executor", m_CxxResult);
496 
497  if (tfliteExecutor.size() == 0 || tfliteExecutor == "parser")
498  {
500  }
501  else if (tfliteExecutor == "delegate")
502  {
504  }
505  else if (tfliteExecutor == "tflite")
506  {
508  }
509  else
510  {
511  ARMNN_LOG(info) << fmt::format("Invalid tflite-executor option '{}'.", tfliteExecutor);
512  throw armnn::InvalidArgumentException ("Invalid tflite-executor option");
513  }
514 
515  // For backwards compatibility when deprecated options are used
517  {
519  }
521  {
524  }
525 
526  // Set concurrent to true if the user expects to run inferences asynchronously
528  {
530  }
531 
532  // Parse input tensor shape from the string we got from the command-line.
533  std::vector<std::string> inputTensorShapesVector =
534  ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
535 
536  if (!inputTensorShapesVector.empty())
537  {
538  m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
539 
540  for(const std::string& shape : inputTensorShapesVector)
541  {
542  std::stringstream ss(shape);
543  std::vector<unsigned int> dims = ParseArray(ss);
544 
546  std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
547  }
548  }
549 
550  // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
552 
553  // Parse CL tuning parameters to runtime options
554  if (!m_ExNetParams.m_TuningPath.empty())
555  {
556  m_RuntimeOptions.m_BackendOptions.emplace_back(
558  {
559  "GpuAcc",
560  {
561  {"TuningLevel", m_ExNetParams.m_TuningLevel},
562  {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
563  {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
564  {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
565  }
566  }
567  );
568  }
569 
571 }
572 
ExecuteNetworkParams m_ExNetParams
std::vector< std::string > m_InputTypes
void ValidateExecuteNetworkParams()
Ensures that the parameters for ExecuteNetwork fit together.
optionType GetOptionValue(std::string &&optionName, const cxxopts::ParseResult &result)
Provides a segfault safe way to get cxxopts option values by checking if the option was defined...
std::string m_OutgoingCaptureFile
Path to a file in which outgoing timeline profiling messages will be stored.
Definition: IRuntime.hpp:142
std::vector< TensorShapePtr > m_InputTensorShapes
std::vector< unsigned int > ParseArray(std::istream &stream)
void RemoveDuplicateDevices(std::vector< armnn::BackendId > &computeDevices)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
BackendRegistry & BackendRegistryInstance()
std::vector< armnn::BackendId > GetBackendIDs(const std::vector< std::string > &backendStringsVec)
Takes a vector of backend strings and returns a vector of backendIDs.
std::vector< std::string > m_OutputNames
std::string GetBackendIdsAsString() const
std::string m_IncomingCaptureFile
Path to a file in which incoming timeline profiling messages will be stored.
Definition: IRuntime.hpp:144
std::vector< std::string > m_OutputTensorFiles
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
Definition: IRuntime.hpp:138
bool m_FileOnly
Enable profiling output to file only.
Definition: IRuntime.hpp:146
bool CheckOption(const cxxopts::ParseResult &result, const char *option)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
Definition: StringUtils.hpp:88
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:189
ProgramOptions()
Initializes ProgramOptions by adding options to the underlying cxxopts::options object.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::vector< std::string > m_InputNames
std::vector< std::string > m_InputTensorDataFilePaths
void ParseOptions(int ac, const char *av[])
Parses program options from the command line or another source and stores the values in member variab...
void CheckForDeprecatedOptions(const cxxopts::ParseResult &result)
Struct for the users to pass backend specific options.
void LogAndThrowFatal(std::string errorMessage)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:98
Holds and parses program options for the ExecuteNetwork application.
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:93
void CheckOptionDependency(const cxxopts::ParseResult &result, const char *option, const char *required)
void ValidateRuntimeOptions()
Ensures that the runtime options are valid.
uint32_t m_CapturePeriod
The duration at which captured profiling messages will be flushed.
Definition: IRuntime.hpp:148
void CheckOptionDependencies(const cxxopts::ParseResult &result)
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
Definition: IRuntime.hpp:140
cxxopts::ParseResult m_CxxResult
void CheckRequiredOptions(const cxxopts::ParseResult &result)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:153
std::string m_FileFormat
The format of the file used for outputting profiling data.
Definition: IRuntime.hpp:150