ArmNN
 21.02
ExecuteNetworkProgramOptions.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include "InferenceTest.hpp"
9 
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/utility/Assert.hpp>
14 #include <armnn/Logging.hpp>
15 
16 #include <fmt/format.h>
17 
18 bool CheckOption(const cxxopts::ParseResult& result,
19  const char* option)
20 {
21  // Check that the given option is valid.
22  if (option == nullptr)
23  {
24  return false;
25  }
26 
27  // Check whether 'option' is provided.
28  return ((result.count(option)) ? true : false);
29 }
30 
31 void CheckOptionDependency(const cxxopts::ParseResult& result,
32  const char* option,
33  const char* required)
34 {
35  // Check that the given options are valid.
36  if (option == nullptr || required == nullptr)
37  {
38  throw cxxopts::OptionParseException("Invalid option to check dependency for");
39  }
40 
41  // Check that if 'option' is provided, 'required' is also provided.
42  if (CheckOption(result, option) && !result[option].has_default())
43  {
44  if (CheckOption(result, required) == 0 || result[required].has_default())
45  {
46  throw cxxopts::OptionParseException(
47  std::string("Option '") + option + "' requires option '" + required + "'.");
48  }
49  }
50 }
51 
52 void CheckOptionDependencies(const cxxopts::ParseResult& result)
53 {
54  CheckOptionDependency(result, "model-path", "model-format");
55  CheckOptionDependency(result, "input-tensor-shape", "model-path");
56  CheckOptionDependency(result, "tuning-level", "tuning-path");
57 }
58 
59 void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
60 {
61  // Mark the duplicate devices as 'Undefined'.
62  for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
63  {
64  for (auto j = std::next(i); j != computeDevices.end(); ++j)
65  {
66  if (*j == *i)
67  {
69  }
70  }
71  }
72 
73  // Remove 'Undefined' devices.
74  computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
75  computeDevices.end());
76 }
77 
78 /// Takes a vector of backend strings and returns a vector of backendIDs. Removes duplicate entries.
79 std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStrings)
80 {
81  std::vector<armnn::BackendId> backendIDs;
82  for (const auto& b : backendStrings)
83  {
84  backendIDs.push_back(armnn::BackendId(b));
85  }
86 
87  RemoveDuplicateDevices(backendIDs);
88 
89  return backendIDs;
90 }
91 
92 /// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
93 /// If the option wasn't defined it returns an empty object.
94 template<typename optionType>
95 optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
96 {
97  optionType out;
98  if(result.count(optionName))
99  {
100  out = result[optionName].as<optionType>();
101  }
102  return out;
103 }
104 
105 void LogAndThrowFatal(std::string errorMessage)
106 {
107  throw armnn::InvalidArgumentException (errorMessage);
108 }
109 
110 void CheckRequiredOptions(const cxxopts::ParseResult& result)
111 {
112 
113  // For each option in option-group "a) Required
114  std::vector<std::string> requiredOptions{"compute",
115  "model-format",
116  "model-path",
117  "input-name",
118  "output-name"};
119 
120  bool requiredMissing = false;
121  for(auto const& str : requiredOptions)
122  {
123  if(!(result.count(str) > 0))
124  {
125  ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
126  requiredMissing = true;
127  }
128  }
129  if(requiredMissing)
130  {
131  throw armnn::InvalidArgumentException ("Some required arguments are missing");
132  }
133 }
134 
136 {
138 }
139 
141 {
144  {
145  LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
146  }
147 }
148 
149 
151  "Executes a neural network model using the provided input "
152  "tensor. Prints the resulting output tensor."}
153 {
154  try
155  {
156  // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
157  // separate function CheckRequiredOptions() for that.
158  m_CxxOptions.add_options("a) Required")
159  ("c,compute",
160  "Which device to run layers on by default. Possible choices: "
162  + " NOTE: Compute devices need to be passed as a comma separated list without whitespaces "
163  "e.g. CpuRef,CpuAcc",
164  cxxopts::value<std::vector<std::string>>())
165 
166  ("f,model-format",
167  "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
168  "tensorflow-text.",
169  cxxopts::value<std::string>())
170 
171  ("D,armnn-tflite-delegate",
172  "enable Arm NN TfLite delegate",
173  cxxopts::value<bool>(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true"))
174 
175  ("m,model-path",
176  "Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
177  cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
178 
179  ("i,input-name",
180  "Identifier of the input tensors in the network separated by comma.",
181  cxxopts::value<std::string>())
182 
183  ("o,output-name",
184  "Identifier of the output tensors in the network separated by comma.",
185  cxxopts::value<std::string>());
186 
187  m_CxxOptions.add_options("b) General")
188  ("b,dynamic-backends-path",
189  "Path where to load any available dynamic backend from. "
190  "If left empty (the default), dynamic backends will not be used.",
191  cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
192 
193  ("d,input-tensor-data",
194  "Path to files containing the input data as a flat array separated by whitespace. "
195  "Several paths can be passed by separating them with a comma. If not specified, the network will be "
196  "run with dummy data (useful for profiling).",
197  cxxopts::value<std::string>()->default_value(""))
198 
199  ("h,help", "Display usage information")
200 
201  ("infer-output-shape",
202  "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
203  "parser)",
204  cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
205 
206  ("iterations",
207  "Number of iterations to run the network for, default is set to 1",
208  cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
209 
210  ("l,dequantize-output",
211  "If this option is enabled, all quantized outputs will be dequantized to float. "
212  "If unset, default to not get dequantized. "
213  "Accepted values (true or false)",
214  cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
215 
216  ("p,print-intermediate-layers",
217  "If this option is enabled, the output of every graph layer will be printed.",
218  cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
219  ->implicit_value("true"))
220 
221  ("parse-unsupported",
222  "Add unsupported operators as stand-in layers (where supported by parser)",
223  cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
224 
225  ("q,quantize-input",
226  "If this option is enabled, all float inputs will be quantized to qasymm8. "
227  "If unset, default to not quantized. Accepted values (true or false)",
228  cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
229 
230  ("r,threshold-time",
231  "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
232  "inference time is greater than the threshold time, the test will fail. By default, no threshold "
233  "time is used.",
234  cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
235 
236  ("s,input-tensor-shape",
237  "The shape of the input tensors in the network as a flat array of integers separated by comma."
238  "Several shapes can be passed by separating them with a colon (:).",
239  cxxopts::value<std::string>())
240 
241  ("v,visualize-optimized-model",
242  "Enables built optimized model visualizer. If unset, defaults to off.",
243  cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
244  ->implicit_value("true"))
245 
246  ("w,write-outputs-to-file",
247  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
248  "If left empty (the default), the output tensors will not be written to a file.",
249  cxxopts::value<std::string>())
250 
251  ("x,subgraph-number",
252  "Id of the subgraph to be executed. Defaults to 0.",
253  cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
254 
255  ("y,input-type",
256  "The type of the input tensors in the network separated by comma. "
257  "If unset, defaults to \"float\" for all defined inputs. "
258  "Accepted values (float, int or qasymm8).",
259  cxxopts::value<std::string>())
260 
261  ("z,output-type",
262  "The type of the output tensors in the network separated by comma. "
263  "If unset, defaults to \"float\" for all defined outputs. "
264  "Accepted values (float, int or qasymm8).",
265  cxxopts::value<std::string>());
266 
267  m_CxxOptions.add_options("c) Optimization")
268  ("bf16-turbo-mode",
269  "If this option is enabled, FP32 layers, "
270  "weights and biases will be converted to BFloat16 where the backend supports it",
271  cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
272  ->default_value("false")->implicit_value("true"))
273 
274  ("enable-fast-math",
275  "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
276  "performance improvements but may result in reduced or different precision.",
277  cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
278 
279  ("number-of-threads",
280  "Assign the number of threads used by the CpuAcc backend. "
281  "Input value must be between 1 and 64. "
282  "Default is set to 0 (Backend will decide number of threads to use).",
283  cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
284 
285  ("save-cached-network",
286  "Enables saving of the cached network to a file given with the cached-network-filepath option. "
287  "See also --cached-network-filepath",
288  cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
289  ->default_value("false")->implicit_value("true"))
290 
291  ("cached-network-filepath",
292  "If non-empty, the given file will be used to load/save the cached network. "
293  "If save-cached-network is given then the cached network will be saved to the given file. "
294  "To save the cached network a file must already exist. "
295  "If save-cached-network is not given then the cached network will be loaded from the given file. "
296  "This will remove initial compilation time of kernels and speed up the first execution.",
297  cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
298 
299  ("fp16-turbo-mode",
300  "If this option is enabled, FP32 layers, "
301  "weights and biases will be converted to FP16 where the backend supports it",
302  cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
303  ->default_value("false")->implicit_value("true"))
304 
305  ("tuning-level",
306  "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
307  "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
308  "Requires tuning-path to be set, default is set to 0 (No tuning run)",
309  cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
310 
311  ("tuning-path",
312  "Path to tuning file. Enables use of CL tuning",
313  cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
314 
315  ("MLGOTuningFilePath",
316  "Path to tuning file. Enables use of CL MLGO tuning",
317  cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath));
318 
319  m_CxxOptions.add_options("d) Profiling")
320  ("a,enable-external-profiling",
321  "If enabled external profiling will be switched on",
323  ->default_value("false")->implicit_value("true"))
324 
325  ("e,event-based-profiling",
326  "Enables built in profiler. If unset, defaults to off.",
327  cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
328 
329  ("g,file-only-external-profiling",
330  "If enabled then the 'file-only' test mode of external profiling will be enabled",
331  cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
332  ->default_value("false")->implicit_value("true"))
333 
334  ("file-format",
335  "If profiling is enabled specifies the output file format",
336  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
337 
338  ("j,outgoing-capture-file",
339  "If specified the outgoing external profiling packets will be captured in this binary file",
340  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
341 
342  ("k,incoming-capture-file",
343  "If specified the incoming external profiling packets will be captured in this binary file",
344  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
345 
346  ("timeline-profiling",
347  "If enabled timeline profiling will be switched on, requires external profiling",
349  ->default_value("false")->implicit_value("true"))
350 
351  ("u,counter-capture-period",
352  "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
353  cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"));
354  }
355  catch (const std::exception& e)
356  {
357  ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
358  ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
359  exit(EXIT_FAILURE);
360  }
361 }
362 
363 ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
364 {
365  ParseOptions(ac, av);
366 }
367 
368 void ProgramOptions::ParseOptions(int ac, const char* av[])
369 {
370  // Parses the command-line.
371  m_CxxResult = m_CxxOptions.parse(ac, av);
372 
373  if (m_CxxResult.count("help") || ac <= 1)
374  {
375  std::cout << m_CxxOptions.help() << std::endl;
376  exit(EXIT_SUCCESS);
377  }
378 
381 
382  // Some options can't be assigned directly because they need some post-processing:
383  auto computeDevices = GetOptionValue<std::vector<std::string>>("compute", m_CxxResult);
384  m_ExNetParams.m_ComputeDevices = GetBackendIDs(computeDevices);
386  armnn::stringUtils::StringTrimCopy(GetOptionValue<std::string>("model-format", m_CxxResult));
388  ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
390  ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
392  ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
394  ParseStringList(GetOptionValue<std::string>("input-type", m_CxxResult), ",");
396  ParseStringList(GetOptionValue<std::string>("output-type", m_CxxResult), ",");
398  ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
402 
403  // Parse input tensor shape from the string we got from the command-line.
404  std::vector<std::string> inputTensorShapesVector =
405  ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
406 
407  if (!inputTensorShapesVector.empty())
408  {
409  m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
410 
411  for(const std::string& shape : inputTensorShapesVector)
412  {
413  std::stringstream ss(shape);
414  std::vector<unsigned int> dims = ParseArray(ss);
415 
417  std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
418  }
419  }
420 
421  // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
423 
424  // Parse CL tuning parameters to runtime options
425  if (!m_ExNetParams.m_TuningPath.empty())
426  {
427  m_RuntimeOptions.m_BackendOptions.emplace_back(
429  {
430  "GpuAcc",
431  {
432  {"TuningLevel", m_ExNetParams.m_TuningLevel},
433  {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
434  {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
435  {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
436  }
437  }
438  );
439  }
440 
442 }
443 
ExecuteNetworkParams m_ExNetParams
std::vector< std::string > m_InputTypes
void ValidateExecuteNetworkParams()
Ensures that the parameters for ExecuteNetwork fit together.
optionType GetOptionValue(std::string &&optionName, const cxxopts::ParseResult &result)
Provides a segfault safe way to get cxxopts option values by checking if the option was defined...
std::vector< TensorShapePtr > m_InputTensorShapes
std::vector< unsigned int > ParseArray(std::istream &stream)
void RemoveDuplicateDevices(std::vector< armnn::BackendId > &computeDevices)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
BackendRegistry & BackendRegistryInstance()
std::vector< std::string > m_OutputNames
std::string GetBackendIdsAsString() const
std::vector< std::string > m_OutputTensorFiles
bool CheckOption(const cxxopts::ParseResult &result, const char *option)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
Definition: StringUtils.hpp:85
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:116
ProgramOptions()
Initializes ProgramOptions by adding options to the underlying cxxopts::options object.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::vector< armnn::BackendId > GetBackendIDs(const std::vector< std::string > &backendStrings)
Takes a vector of backend strings and returns a vector of backendIDs. Removes duplicate entries...
std::vector< std::string > m_InputNames
std::vector< std::string > m_InputTensorDataFilePaths
void ParseOptions(int ac, const char *av[])
Parses program options from the command line or another source and stores the values in member variab...
Struct for the users to pass backend specific options.
void LogAndThrowFatal(std::string errorMessage)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:60
Holds and parses program options for the ExecuteNetwork application.
void CheckOptionDependency(const cxxopts::ParseResult &result, const char *option, const char *required)
void ValidateRuntimeOptions()
Ensures that the runtime options are valid.
void CheckOptionDependencies(const cxxopts::ParseResult &result)
cxxopts::ParseResult m_CxxResult
void CheckRequiredOptions(const cxxopts::ParseResult &result)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:84