16 #include <fmt/format.h> 22 if (option ==
nullptr)
28 return ((result.count(option)) ?
true :
false);
36 if (option ==
nullptr || required ==
nullptr)
38 throw cxxopts::OptionParseException(
"Invalid option to check dependency for");
42 if (
CheckOption(result, option) && !result[option].has_default())
44 if (
CheckOption(result, required) == 0 || result[required].has_default())
46 throw cxxopts::OptionParseException(
47 std::string(
"Option '") + option +
"' requires option '" + required +
"'.");
62 for (
auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
64 for (
auto j = std::next(i); j != computeDevices.end(); ++j)
75 computeDevices.end());
79 std::vector<armnn::BackendId>
GetBackendIDs(
const std::vector<std::string>& backendStrings)
81 std::vector<armnn::BackendId> backendIDs;
82 for (
const auto& b : backendStrings)
94 template<
typename optionType>
95 optionType
GetOptionValue(std::string&& optionName,
const cxxopts::ParseResult& result)
98 if(result.count(optionName))
100 out = result[optionName].as<optionType>();
114 std::vector<std::string> requiredOptions{
"compute",
120 bool requiredMissing =
false;
121 for(
auto const& str : requiredOptions)
123 if(!(result.count(str) > 0))
125 ARMNN_LOG(error) << fmt::format(
"The program option '{}' is mandatory but wasn't provided.", str);
126 requiredMissing =
true;
145 LogAndThrowFatal(
"Timeline profiling requires external profiling to be turned on");
151 "Executes a neural network model using the provided input " 152 "tensor. Prints the resulting output tensor."}
160 "Which device to run layers on by default. Possible choices: " 162 +
" NOTE: Compute devices need to be passed as a comma separated list without whitespaces " 163 "e.g. CpuRef,CpuAcc",
164 cxxopts::value<std::vector<std::string>>())
167 "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or " 169 cxxopts::value<std::string>())
171 (
"D,armnn-tflite-delegate",
172 "enable Arm NN TfLite delegate",
176 "Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
180 "Identifier of the input tensors in the network separated by comma.",
181 cxxopts::value<std::string>())
184 "Identifier of the output tensors in the network separated by comma.",
185 cxxopts::value<std::string>());
188 (
"b,dynamic-backends-path",
189 "Path where to load any available dynamic backend from. " 190 "If left empty (the default), dynamic backends will not be used.",
193 (
"d,input-tensor-data",
194 "Path to files containing the input data as a flat array separated by whitespace. " 195 "Several paths can be passed by separating them with a comma. If not specified, the network will be " 196 "run with dummy data (useful for profiling).",
197 cxxopts::value<std::string>()->default_value(
""))
199 (
"h,help",
"Display usage information")
201 (
"infer-output-shape",
202 "Infers output tensor shape from input tensor shape and validate where applicable (where supported by " 207 "Number of iterations to run the network for, default is set to 1",
210 (
"l,dequantize-output",
211 "If this option is enabled, all quantized outputs will be dequantized to float. " 212 "If unset, default to not get dequantized. " 213 "Accepted values (true or false)",
216 (
"p,print-intermediate-layers",
217 "If this option is enabled, the output of every graph layer will be printed.",
219 ->implicit_value(
"true"))
221 (
"parse-unsupported",
222 "Add unsupported operators as stand-in layers (where supported by parser)",
226 "If this option is enabled, all float inputs will be quantized to qasymm8. " 227 "If unset, default to not quantized. Accepted values (true or false)",
231 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual " 232 "inference time is greater than the threshold time, the test will fail. By default, no threshold " 236 (
"s,input-tensor-shape",
237 "The shape of the input tensors in the network as a flat array of integers separated by comma." 238 "Several shapes can be passed by separating them with a colon (:).",
239 cxxopts::value<std::string>())
241 (
"v,visualize-optimized-model",
242 "Enables built optimized model visualizer. If unset, defaults to off.",
244 ->implicit_value(
"true"))
246 (
"w,write-outputs-to-file",
247 "Comma-separated list of output file paths keyed with the binding-id of the output slot. " 248 "If left empty (the default), the output tensors will not be written to a file.",
249 cxxopts::value<std::string>())
251 (
"x,subgraph-number",
252 "Id of the subgraph to be executed. Defaults to 0.",
256 "The type of the input tensors in the network separated by comma. " 257 "If unset, defaults to \"float\" for all defined inputs. " 258 "Accepted values (float, int or qasymm8).",
259 cxxopts::value<std::string>())
262 "The type of the output tensors in the network separated by comma. " 263 "If unset, defaults to \"float\" for all defined outputs. " 264 "Accepted values (float, int or qasymm8).",
265 cxxopts::value<std::string>());
269 "If this option is enabled, FP32 layers, " 270 "weights and biases will be converted to BFloat16 where the backend supports it",
272 ->default_value(
"false")->implicit_value(
"true"))
275 "Enables fast_math options in backends that support it. Using the fast_math flag can lead to " 276 "performance improvements but may result in reduced or different precision.",
279 (
"number-of-threads",
280 "Assign the number of threads used by the CpuAcc backend. " 281 "Input value must be between 1 and 64. " 282 "Default is set to 0 (Backend will decide number of threads to use).",
285 (
"save-cached-network",
286 "Enables saving of the cached network to a file given with the cached-network-filepath option. " 287 "See also --cached-network-filepath",
289 ->default_value(
"false")->implicit_value(
"true"))
291 (
"cached-network-filepath",
292 "If non-empty, the given file will be used to load/save the cached network. " 293 "If save-cached-network is given then the cached network will be saved to the given file. " 294 "To save the cached network a file must already exist. " 295 "If save-cached-network is not given then the cached network will be loaded from the given file. " 296 "This will remove initial compilation time of kernels and speed up the first execution.",
300 "If this option is enabled, FP32 layers, " 301 "weights and biases will be converted to FP16 where the backend supports it",
303 ->default_value(
"false")->implicit_value(
"true"))
306 "Sets the tuning level which enables a tuning run which will update/create a tuning file. " 307 "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). " 308 "Requires tuning-path to be set, default is set to 0 (No tuning run)",
312 "Path to tuning file. Enables use of CL tuning",
315 (
"MLGOTuningFilePath",
316 "Path to tuning file. Enables use of CL MLGO tuning",
320 (
"a,enable-external-profiling",
321 "If enabled external profiling will be switched on",
323 ->default_value(
"false")->implicit_value(
"true"))
325 (
"e,event-based-profiling",
326 "Enables built in profiler. If unset, defaults to off.",
329 (
"g,file-only-external-profiling",
330 "If enabled then the 'file-only' test mode of external profiling will be enabled",
332 ->default_value(
"false")->implicit_value(
"true"))
335 "If profiling is enabled specifies the output file format",
338 (
"j,outgoing-capture-file",
339 "If specified the outgoing external profiling packets will be captured in this binary file",
342 (
"k,incoming-capture-file",
343 "If specified the incoming external profiling packets will be captured in this binary file",
346 (
"timeline-profiling",
347 "If enabled timeline profiling will be switched on, requires external profiling",
349 ->default_value(
"false")->implicit_value(
"true"))
351 (
"u,counter-capture-period",
352 "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
355 catch (
const std::exception& e)
358 ARMNN_LOG(fatal) <<
"Fatal internal error: " << e.what();
383 auto computeDevices = GetOptionValue<std::vector<std::string>>(
"compute",
m_CxxResult);
404 std::vector<std::string> inputTensorShapesVector =
407 if (!inputTensorShapesVector.empty())
411 for(
const std::string& shape : inputTensorShapesVector)
413 std::stringstream ss(shape);
414 std::vector<unsigned int> dims =
ParseArray(ss);
417 std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
ExecuteNetworkParams m_ExNetParams
std::vector< std::string > m_InputTypes
cxxopts::Options m_CxxOptions
void ValidateExecuteNetworkParams()
Ensures that the parameters for ExecuteNetwork fit together.
std::string m_MLGOTuningFilePath
optionType GetOptionValue(std::string &&optionName, const cxxopts::ParseResult &result)
Provides a segfault safe way to get cxxopts option values by checking if the option was defined...
std::string m_OutgoingCaptureFile
std::vector< TensorShapePtr > m_InputTensorShapes
std::vector< unsigned int > ParseArray(std::istream &stream)
void RemoveDuplicateDevices(std::vector< armnn::BackendId > &computeDevices)
bool m_EnableFp16TurboMode
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
std::string m_DynamicBackendsPath
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
BackendRegistry & BackendRegistryInstance()
std::vector< std::string > m_OutputNames
std::string GetBackendIdsAsString() const
std::string m_IncomingCaptureFile
std::vector< std::string > m_OutputTensorFiles
std::string m_CachedNetworkFilePath
bool CheckOption(const cxxopts::ParseResult &result, const char *option)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
ProgramOptions()
Initializes ProgramOptions by adding options to the underlying cxxopts::options object.
bool m_GenerateTensorData
#define ARMNN_ASSERT_MSG(COND, MSG)
unsigned int m_NumberOfThreads
std::vector< armnn::BackendId > GetBackendIDs(const std::vector< std::string > &backendStrings)
Takes a vector of backend strings and returns a vector of backendIDs. Removes duplicate entries...
std::vector< std::string > m_InputNames
bool m_EnableBf16TurboMode
std::vector< std::string > m_InputTensorDataFilePaths
void ParseOptions(int ac, const char *av[])
Parses program options from the command line or another source and stores the values in member variab...
Struct for the users to pass backend specific options.
void LogAndThrowFatal(std::string errorMessage)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Holds and parses program options for the ExecuteNetwork application.
void CheckOptionDependency(const cxxopts::ParseResult &result, const char *option, const char *required)
bool m_EnableLayerDetails
void ValidateRuntimeOptions()
Ensures that the runtime options are valid.
void CheckOptionDependencies(const cxxopts::ParseResult &result)
cxxopts::ParseResult m_CxxResult
void CheckRequiredOptions(const cxxopts::ParseResult &result)
ExternalProfilingOptions m_ProfilingOptions
std::string m_ModelFormat