aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorTracy Narine <tracy.narine@arm.com>2023-08-15 16:09:24 +0100
committerTracy Narine <tracy.narine@arm.com>2023-08-21 11:15:03 +0100
commitc4f42340bd3d6664098c69d2fb044089aa39aea0 (patch)
tree177858cec3340eca636e60bed3ff4c9f5fbd0774 /tests
parent846a24f8256d49ad44984b8e54f37761b9ea5132 (diff)
downloadarmnn-c4f42340bd3d6664098c69d2fb044089aa39aea0.tar.gz
IVGCVSW-6667 Update the Arm NN Execute Network app --help
* Moved deprecated functionality to its own heading * Updated documentation for many of the parameters * Added information on execution time versus inference time Signed-off-by: Tracy Narine <tracy.narine@arm.com> Change-Id: If017e49e7f6c60a72fe948c1cfa1a6f6a5c4a692
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp157
1 files changed, 85 insertions, 72 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 4066cdcadb..72c414aa0e 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -191,7 +191,13 @@ void ProgramOptions::ValidateRuntimeOptions()
ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"Executes a neural network model using the provided input "
- "tensor. Prints the resulting output tensor."}
+ "tensor(s). Prints the resulting output tensor(s).\n"
+ "\n"
+ "The Execution Time generated by the Arm NN Core code is a\n"
+ "more accurate measure of the time taken overall to run the model.\n"
+ "The Inference Time printed in an Execute Network run is doing\n"
+ "a crude measurement of overall time to check against a very inexact\n"
+ "upper threshold and should not be used for performance analysis.\n"}
{
try
{
@@ -199,35 +205,32 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
// separate function CheckRequiredOptions() for that.
m_CxxOptions.add_options("a) Required")
("c,compute",
- "Which device to run layers on by default. If a single device doesn't support all layers in the model "
- "you can specify a second or third to fall back on. Possible choices: "
- + armnn::BackendRegistryInstance().GetBackendIdsAsString()
- + " NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
- "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c CpuAcc -c CpuRef'. "
- "Duplicates are ignored.",
+ "Which device to run layers on by default. If a single device doesn't support all layers in the "
+ "model you can specify a second or third to fall back on. "
+ "NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
+ "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c CpuAcc -c CpuRef'. "
+ "Duplicates are ignored.\n"
+ "Possible choices for current machine: [ "
+ + armnn::BackendRegistryInstance().GetBackendIdsAsString() + " ]",
cxxopts::value<std::vector<std::string>>())
- ("f,model-format",
- "armnn-binary, onnx-binary, onnx-text, tflite-binary"
- "DEPRECATED: The program option 'model-format' is deprecated and will be "
- "removed soon. The model-format is now automatically set.",
- cxxopts::value<std::string>())
-
("m,model-path",
- "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
- cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
+ "Path to model file, e.g. .armnn, .tflite, .onnx. "
+ "DEPRECATED: .pb and .prototxt model files no longer load and are deprecated.",
+ cxxopts::value<std::string>(m_ExNetParams.m_ModelPath));
+ m_CxxOptions.add_options("b) Ordering")
("i,input-name",
- "Identifier of the input tensors in the network separated by comma."
- "This option is not required, but can be used to set the order of inputs",
+ "Identifier of the input tensors in the network separated by a comma "
+ "and is only required for ordering input files",
cxxopts::value<std::string>())
("o,output-name",
- "Identifier of the output tensors in the network separated by comma."
- "This option is not required, but can be used to set the order of outputs",
+ "Identifier of the output tensors in the network separated by a comma "
+ "and is only required for ordering output files.",
cxxopts::value<std::string>());
- m_CxxOptions.add_options("b) General")
+ m_CxxOptions.add_options("c) General")
("b,dynamic-backends-path",
"Path where to load any available dynamic backend from. "
"If left empty (the default), dynamic backends will not be used.",
@@ -237,14 +240,6 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"Run the network using the Arm NN thread pool with the number of threads provided. ",
cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"))
- ("n,concurrent",
- "This option is for Arm NN internal asynchronous testing purposes. "
- "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
- "if 'thread-pool-size' is greater than 0, for asynchronous execution."
- "DEPRECATED: The program option 'concurrent' is deprecated and will be "
- "removed soon. Please use the option '\"P, thread-pool-size\"' instead.",
- cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
-
("d,input-tensor-data",
"Path to files containing the input data as a flat array separated by whitespace. "
"Several paths can be passed by separating them with a comma if the network has multiple inputs "
@@ -280,7 +275,6 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("l,dequantize-output",
"If this option is enabled, all quantized outputs will be dequantized to float. "
"If unset, default to not get dequantized. "
- "Accepted values (true or false)"
" (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
@@ -305,7 +299,6 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("q,quantize-input",
"If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
- "If unset, default to not quantized. Accepted values (true or false)"
" (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
@@ -335,28 +328,12 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
" (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
- ("y,input-type",
- "The type of the input tensors in the network separated by comma. "
- "If unset, defaults to \"float\" for all defined inputs. "
- "Accepted values (float, int, qasymms8 or qasymmu8)."
- "DEPRECATED: The program option 'input-type' is deprecated and will be "
- "removed soon. The input-types are now automatically set.",
- cxxopts::value<std::string>())
-
- ("z,output-type",
- "The type of the output tensors in the network separated by comma. "
- "If unset, defaults to \"float\" for all defined outputs. "
- "Accepted values (float, int, qasymms8 or qasymmu8)."
- "DEPRECATED: The program option 'output-type' is deprecated and will be "
- "removed soon. The output-types are now automatically set.",
- cxxopts::value<std::string>())
-
("T,tflite-executor",
- "Set the executor for the tflite model: parser, delegate, tflite"
- "parser is the ArmNNTfLiteParser, "
- "delegate is the ArmNNTfLiteDelegate, "
- "opaquedelegate is the ArmNNTfLiteOpaqueDelegate, "
- "tflite is the TfliteInterpreter",
+ "Set the executor for the tflite model: parser, delegate, opaquedelegate, tflite.\n"
+ "parser is the ArmNNTfLiteParser, \n"
+ "delegate is the ArmNNTfLiteDelegate, \n"
+ "opaquedelegate is the ArmNNTfLiteOpaqueDelegate, \n"
+ "tflite is the TfliteInterpreter (Bypasses Arm NN and runs model in Tensorflow Lite directly)\n",
cxxopts::value<std::string>()->default_value("parser"))
("C, compare-output",
@@ -376,16 +353,11 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<bool>(m_ExNetParams.m_CompareWithTflite)->default_value("false")
->implicit_value("true"));
- m_CxxOptions.add_options("c) Optimization")
- ("bf16-turbo-mode",
- "This option is no longer being used. In order to use bf16 please set enable-fast-math "
- "to true",
- cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
- ->default_value("false")->implicit_value("true"))
-
+ m_CxxOptions.add_options("d) Optimization")
("enable-fast-math",
- "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
- "performance improvements but may result in reduced or different precision. ",
+ "Enables fast_math options in backends that support it. Using the fast_math flag "
+ "can lead to performance improvements but may result in reduced or different precision. "
+ "Supported on CpuAcc and GpuAcc only.",
cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
("number-of-threads",
@@ -395,7 +367,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
("save-cached-network",
- "Enables saving of the cached network to a file given with the cached-network-filepath option. "
+ "Enables saving of the cached network to a file given with the cached-network-filepath option.\n"
+ "Supported on GpuAcc only.\n"
"See also --cached-network-filepath",
cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
->default_value("false")->implicit_value("true"))
@@ -405,14 +378,16 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"If save-cached-network is given then the cached network will be saved to the given file. "
"To save the cached network a file must already exist. "
"If save-cached-network is not given then the cached network will be loaded from the given file. "
- "This will remove initial compilation time of kernels and speed up the first execution.",
+ "This will remove initial compilation time of kernels and speed up the first execution.\n"
+ "Supported on GpuAcc only.",
cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
("fp16-turbo-mode",
"If this option is enabled, FP32 layers, "
"weights and biases will be converted to FP16 where the backend supports it. "
"If the first preferred backend does not have FP16 support, this option will be disabled. "
- "If the value of converted FP16 is infinity, round to the closest finite FP16 value.",
+ "If the value of converted FP16 is infinity, round to the closest finite FP16 value."
+ "Supported on CpuAcc and GpuAcc only.",
cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
->default_value("false")->implicit_value("true"))
@@ -427,23 +402,20 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
("MLGOTuningFilePath",
- "Path to tuning file. Enables use of CL MLGO tuning",
+ "Path to tuning file. Enables use of CL MLGO tuning.\n"
+ "Supported on GpuAcc only.",
cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath))
("R, reuse-buffers",
"If enabled then the IO buffers will be reused for each inference",
cxxopts::value<bool>(m_ExNetParams.m_ReuseBuffers)->default_value("false")->implicit_value("true"));
- m_CxxOptions.add_options("d) Profiling")
+ m_CxxOptions.add_options("e) Profiling")
("a,enable-external-profiling",
"If enabled external profiling will be switched on",
cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
->default_value("false")->implicit_value("true"))
- ("e,event-based-profiling",
- "Enables built in profiler. If unset, defaults to off.",
- cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
-
("g,file-only-external-profiling",
"If enabled then the 'file-only' test mode of external profiling will be enabled",
cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
@@ -470,20 +442,61 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"))
+ ("e,event-based-profiling",
+ "Enables built in profiler.",
+ cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
+
("output-network-details",
- "Outputs layer tensor infos and descriptors to std out along with profiling events. Defaults to off.",
+ "Outputs layer tensor infos and descriptors to std out along with profiling events.",
cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsToStdOut)->default_value("false")
->implicit_value("true"))
("output-network-details-only",
- "Outputs layer tensor infos and descriptors to std out without profiling events. Defaults to off.",
+ "Outputs layer tensor infos and descriptors to std out without profiling events.",
cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsOnlyToStdOut)->default_value("false")
->implicit_value("true"))
("import-inputs-if-aligned",
- "In & Out tensors will be imported per inference if the memory alignment allows. Defaults to false.",
+ "In & Out tensors will be imported per inference if the memory alignment allows.",
cxxopts::value<bool>(m_ExNetParams.m_ImportInputsIfAligned)->default_value("false")
->implicit_value("true"));
+
+ m_CxxOptions.add_options("f) Deprecated or unused")
+ ("f,model-format",
+ "armnn-binary, onnx-binary, onnx-text, tflite-binary"
+ "DEPRECATED: The program option 'model-format' is deprecated and will be "
+ "removed soon. The model-format is now automatically set.",
+ cxxopts::value<std::string>())
+
+ ("n,concurrent",
+ "This option is for Arm NN internal asynchronous testing purposes. "
+ "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
+ "if 'thread-pool-size' is greater than 0, for asynchronous execution."
+ "DEPRECATED: The program option 'concurrent' is deprecated and will be "
+ "removed soon. Please use the option '\"P, thread-pool-size\"' instead.",
+ cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
+
+ ("y,input-type",
+ "The type of the input tensors in the network separated by comma. "
+ "If unset, defaults to \"float\" for all defined inputs. "
+ "Accepted values (float, int, qasymms8 or qasymmu8)."
+ "DEPRECATED: The program option 'input-type' is deprecated and will be "
+ "removed soon. The input-types are now automatically set.",
+ cxxopts::value<std::string>())
+
+ ("z,output-type",
+ "The type of the output tensors in the network separated by comma. "
+ "If unset, defaults to \"float\" for all defined outputs. "
+ "Accepted values (float, int, qasymms8 or qasymmu8)."
+ "DEPRECATED: The program option 'output-type' is deprecated and will be "
+ "removed soon. The output-types are now automatically set.",
+ cxxopts::value<std::string>())
+
+ ("bf16-turbo-mode",
+ "This option is no longer being used. In order to use bf16 please set enable-fast-math "
+ "to true",
+ cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
+ ->default_value("false")->implicit_value("true"));
}
catch (const std::exception& e)
{