aboutsummaryrefslogtreecommitdiff
path: root/23.05/runtimeoptions.xhtml
blob: 1bef0586a8d0759cd339118d59763fa8db9774b3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
<!-- Copyright (c) 2020 ARM Limited. -->
<!--                                 -->
<!-- SPDX-License-Identifier: MIT    -->
<!--                                 -->
<!-- HTML header for doxygen 1.8.13-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.17"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" />
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>ArmNN: Runtime options for Arm NN</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
  MathJax.Hub.Config({
    extensions: ["tex2jax.js"],
    jax: ["input/TeX","output/HTML-CSS"],
});
</script>
<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 10rem; margin-top: .5rem; margin-left 10px"/>
  <td style="padding-left: 0.5em;">
   <div id="projectname">
   &#160;<span id="projectnumber">23.05</span>
   </div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.17 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
  initMenu('',true,false,'search.php','Search');
  $(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
  <div id="nav-tree">
    <div id="nav-tree-contents">
      <div id="nav-sync" class="sync"></div>
    </div>
  </div>
  <div id="splitbar" style="-moz-user-select:none;" 
       class="ui-resizable-handle">
  </div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('runtimeoptions.xhtml',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="PageDoc"><div class="header">
  <div class="headertitle">
<div class="title">Runtime options </div>  </div>
</div><!--header-->
<div class="contents">
<div class="toc"><h3>Table of Contents</h3>
<ul><ul><li class="level2"><a href="#deviceselection">Compute device selection</a></li>
<li class="level2"><a href="#runtimeoptions">Runtime options</a></li>
<li class="level2"><a href="#instanceoptions">Arm NN Instance level options</a></li>
<li class="level2"><a href="#networkoptions">NetworkOptions</a></li>
<li class="level2"><a href="#optimizeroptions">OptimizerOptions</a></li>
<li class="level2"><a href="#modeloptions">OptimizerOptions::ModelOptions</a></li>
<li class="level2"><a href="#gpuaccmodeloptions">GpuAcc backend model options</a></li>
<li class="level2"><a href="#cpuaccmodeloptions">CpuAcc backend model options</a></li>
<li class="level2"><a href="#ethosnmodeloptions">EthosNAcc backend model options</a></li>
</ul>
</ul>
</div>
<div class="textblock"><p>Across all software interfaces to Arm NN there are a set of common configuration parameters. These parameters control how a model is loaded or how the inference is executed. The widest set of options are available at the lowest, Arm NN C++ interface. They reduce as you move outward to the TfLite delegate and NNAPI Support Library. The tables below describe the arguments and in which interface they are available.</p>
<h2><a class="anchor" id="deviceselection"></a>
Compute device selection</h2>
<p>The compute device selected is required to be specified across all interfaces. The device selection will dictate the availability of some parameters and whether some sub graphs are supported.</p>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Interface </th><th class="markdownTableHeadLeft">Device selection  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">Arm NN </td><td class="markdownTableBodyLeft">The parameter "const std::vector&lt;BackendId&gt;&amp; backendPreferences" to <a class="el" href="namespacearmnn.xhtml#aa42e128b41f4e966fc901f9bf42c5a1c" title="Create an optimized version of the network.">armnn::Optimize</a> provides a vector of backendId's. If multiple devices are specifed the order of the vector dictates the order in which execution will be attempted. If all or part of the model is not supported by a backend, the next in order will be tried.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">TfLite delegate </td><td class="markdownTableBodyLeft">armnnDelegate::DelegateOptions Compute device or backend ids: This tells Arm NN which devices will be used to process the inference. A single device can be specified using the <a class="el" href="namespacearmnn.xhtml#ae2f04a162585c0a5222a537efd5456ae" title="The Compute enum is now deprecated and it is now being replaced by BackendId.">armnn::Compute</a> enum. Multiple devices can be specified using a vector of <a class="el" href="classarmnn_1_1_backend_id.xhtml">armnn::BackendId</a>. If multiple devices are specifed the order of the vector dictates the order in which execution will be attempted. If all or part of the model is not supported by a backend the next in order will be tried. Valid backend ids are: [EthosNAcc/GpuAcc/CpuAcc/CpuRef]  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">Support Library </td><td class="markdownTableBodyLeft"><a class="el" href="classarmnn__driver_1_1_driver_options.xhtml">armnn_driver::DriverOptions</a> Compute device or backend ids: This tells Arm NN which devices will be used to process the inference. A single device can be specified using the <a class="el" href="namespacearmnn.xhtml#ae2f04a162585c0a5222a537efd5456ae" title="The Compute enum is now deprecated and it is now being replaced by BackendId.">armnn::Compute</a> enum. Multiple devices can be specified using a vector of <a class="el" href="classarmnn_1_1_backend_id.xhtml">armnn::BackendId</a>. If multiple devices are specifed the order of the vector dictates the order in which execution will be attempted. If all or part of the model is not supported by a backend the next in order will be tried. Valid backend ids are: [EthosNAcc/GpuAcc/CpuAcc/CpuRef]  </td></tr>
</table>
<h2><a class="anchor" id="runtimeoptions"></a>
Runtime options</h2>
<p>There a several levels at which Arm NN accepts runtime parameters. Some of these are specific to an Arm NN instance, some to a loaded network and some to the backend on which a network inference is to execute. Each of the external interfaces handles these options in different ways.</p>
<h2><a class="anchor" id="instanceoptions"></a>
Arm NN Instance level options</h2>
<p>In the Arm NN C++ interface these options are set by passing an armnn::CreationOptions struct to <a class="el" href="classarmnn_1_1_i_runtime.xhtml">IRuntime</a>. Not all available options are described here.</p>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_DynamicBackendsPath </td><td class="markdownTableBodyLeft">dynamic-backends-path </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">A path in which Arm NN will search for dynamic backends to load.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">m_ProtectedMode </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Setting this flag will allow the user to create the Runtime in protected mode. It will run all the inferences on protected memory and will make sure that <a class="el" href="structarmnn_1_1_i_network_properties.xhtml#a111a52fb2bd24aee9fc125f28c2eb1cb" title="Deprecated and will be removed in future release.">INetworkProperties::m_ImportEnabled</a> set to true with <a class="el" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a7f9067c59dd34aca0ad09a7f283ed1f8">MemorySource::DmaBufProtected</a> option. This requires that the backend supports Protected Memory and has an allocator capable of allocating Protected Memory associated with it.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_CustomAllocatorMap </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">std::map&lt;<a class="el" href="classarmnn_1_1_backend_id.xhtml">BackendId</a>, std::shared_ptr&lt;ICustomAllocator&gt;&gt; </td><td class="markdownTableBodyLeft">A map of Custom <a class="el" href="structarmnn_1_1_allocator.xhtml">Allocator</a> used for allocation of working memory in the backends. Required for Protected Mode in order to correctly allocate Protected Memory  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">m_MemoryOptimizerStrategyMap </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">std::map&lt;<a class="el" href="classarmnn_1_1_backend_id.xhtml">BackendId</a>, std::shared_ptr&lt;IMemoryOptimizerStrategy&gt;&gt; </td><td class="markdownTableBodyLeft">A map to define a custom memory optimizer strategy for specific backend Ids.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_GpuAccTunedParameters </td><td class="markdownTableBodyLeft">gpu-tuning-level </td><td class="markdownTableBodyLeft">cl-tuning-level </td><td class="markdownTableBodyLeft">["0"/"1"/"2"/"3"] </td><td class="markdownTableBodyLeft">0=UseOnly(default), 1=RapidTuning, 2=NormalTuning, 3=ExhaustiveTuning. Requires option gpu-tuning-file. 1,2 and 3 will create a tuning-file, 0 will apply the tunings from an existing file  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">disable-tflite-runtime-fallback </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Disable TfLite Runtime fallback in the Arm NN TfLite delegate. An exception will be thrown if unsupported operators are encountered. This option is only for testing purposes.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft"><a class="el" href="namespacearmnn.xhtml#aa59f7a819c3e29d10ffc41e5c0616872" title="Configures the logging behaviour of the ARMNN library.">armnn::ConfigureLogging</a> </td><td class="markdownTableBodyLeft">logging-severity </td><td class="markdownTableBodyLeft">verbose-logging </td><td class="markdownTableBodyLeft">[Trace/Debug/Info/Warning/Error/Fatal </td><td class="markdownTableBodyLeft">Set the level of logging information output by Arm NN.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft"><a class="el" href="namespacearmnn.xhtml#a674efcf6cbdb9e831d653ff0e821fb38">armnn::IOptimizedNetworkPtr</a>-&gt;SerializeToDot </td><td class="markdownTableBodyLeft">serialize-to-dot </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">Serialize the optimized network to the file specified in "dot" format.  </td></tr>
</table>
<p>A specific sub-struct of parameters exists to configure external profiling. This is held as a member, m_ProfilingOptions, of CreationOptions</p>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_EnableProfiling </td><td class="markdownTableBodyLeft">enable-external-profiling </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enable external profiling.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_TimelineEnabled </td><td class="markdownTableBodyLeft">timeline-profiling </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enable Arm Development studio Timeline events.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_OutgoingCaptureFile </td><td class="markdownTableBodyLeft">outgoing-capture-file </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">Path to a file in which outgoing timeline profiling messages will be stored.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_IncomingCaptureFile </td><td class="markdownTableBodyLeft">incoming-capture-file </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">Path to a file in which incoming timeline profiling messages will be stored.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_FileOnly </td><td class="markdownTableBodyLeft">file-only-external-profiling </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enable profiling output to file only.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_CapturePeriod </td><td class="markdownTableBodyLeft">counter-capture-period </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">Integer (default : 10000) </td><td class="markdownTableBodyLeft">Value in microseconds of the profiling capture period.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">m_ProfilingOptions.m_FileFormat </td><td class="markdownTableBodyLeft">profiling-file-format </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">String of ["binary"] </td><td class="markdownTableBodyLeft">The format of the file used for outputting profiling data. Currently on "binary" is supported.  </td></tr>
</table>
<h2><a class="anchor" id="networkoptions"></a>
NetworkOptions</h2>
<p>During Network creation you can specify several optional parameters via <a class="el" href="namespacearmnn.xhtml#a4de71c3661093e5c4ae7775114f43413">armnn::NetworkOptions</a>.</p>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">ShapeInferenceMethod </td><td class="markdownTableBodyLeft">infer-output-shape </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Infers output tensor shape from input tensor shape and validate where applicable.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">AllowExpandedDims </td><td class="markdownTableBodyLeft">allow-expanded-dims </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">If true will disregard dimensions with a size of 1 when validating tensor shapes. <a class="el" href="classarmnn_1_1_tensor.xhtml" title="A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.">Tensor</a> sizes must still match. This is an Experimental parameter that is incompatible with infer-output-shape.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">profilingEnabled </td><td class="markdownTableBodyLeft">enable-internal-profiling </td><td class="markdownTableBodyLeft">enable-internal-profiling </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enable json profiling in CpuAcc and GpuAcc backends.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">detailsMethod </td><td class="markdownTableBodyLeft">internal-profiling-detail </td><td class="markdownTableBodyLeft">(Not Available) </td><td class="markdownTableBodyLeft">ProfilingDetailsMethod </td><td class="markdownTableBodyLeft">Set the detail of internale porfiling. Options are DetailsWithEvents and DetailsOnly.  </td></tr>
</table>
<h2><a class="anchor" id="optimizeroptions"></a>
OptimizerOptions</h2>
<p><a class="el" href="structarmnn_1_1_optimizer_options.xhtml">OptimizerOptions</a> are a set of parameters specifically targeting the Arm NN optimizer. This executes when a model is being loaded and these parameters are used to tune its operation.</p>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">reduceFp32ToFp16 </td><td class="markdownTableBodyLeft">reduce-fp32-to-fp16 </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16. The overhead of these conversions can lead to a slower overall performance if too many conversions are required.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">reduceFp32ToBf16 </td><td class="markdownTableBodyLeft">reduce-fp32-to-bf16 </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">This feature has been replaced by enabling Fast Math in compute library backend options. This is currently a placeholder option  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">debug </td><td class="markdownTableBodyLeft">debug-data </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">If the debug flag is set a <a class="el" href="classarmnn_1_1_debug_layer.xhtml" title="This layer visualizes the data flowing through the network.">DebugLayer</a> is inserted after each layer. The action of each debug layer is backend specific.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">importEnabled </td><td class="markdownTableBodyLeft">memory-import </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Instructs the optimizer that this model will be importing it's input tensors. This value must match the MemorySource set for input in <a class="el" href="structarmnn_1_1_i_network_properties.xhtml">INetworkProperties</a>.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">exportEnabled </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Instructs the optimizer that this model will be exporting it's output tensors. This value must match the MemorySource set for output in <a class="el" href="structarmnn_1_1_i_network_properties.xhtml">INetworkProperties</a>.  </td></tr>
</table>
<h2><a class="anchor" id="modeloptions"></a>
OptimizerOptions::ModelOptions</h2>
<p>Model options is a vector of name value pairs contained inside <a class="el" href="structarmnn_1_1_optimizer_options.xhtml">OptimizerOptions</a>. The options specifically target backends.</p>
<h2><a class="anchor" id="gpuaccmodeloptions"></a>
GpuAcc backend model options</h2>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">FastMathEnabled </td><td class="markdownTableBodyLeft">enable-fast-math </td><td class="markdownTableBodyLeft">enable-fast-math </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enables fast_math options in backends that support it.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">SaveCachedNetwork </td><td class="markdownTableBodyLeft">save-cached-network </td><td class="markdownTableBodyLeft">save-cached-network </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enables saving the cached network to the file given with cached-network-file option.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">CachedNetworkFilePath </td><td class="markdownTableBodyLeft">cached-network-filepath </td><td class="markdownTableBodyLeft">cached-network-file </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">If non-empty, the given file will be used to load/save cached network. If save-cached-network option is given will save the cached network to given file. If save-cached-network option is not given will load the cached network from given file.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">MLGOTuningFilePath </td><td class="markdownTableBodyLeft">gpu-mlgo-tuning-file </td><td class="markdownTableBodyLeft">mlgo-cl-tuned-parameters-file </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">If non-empty, the given file will be used to load/save MLGO CL tuned parameters.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">KernelProfilingEnabled </td><td class="markdownTableBodyLeft">gpu-kernel-profiling-enabled </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enables GPU kernel profiling  </td></tr>
</table>
<h2><a class="anchor" id="cpuaccmodeloptions"></a>
CpuAcc backend model options</h2>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">FastMathEnabled </td><td class="markdownTableBodyLeft">enable-fast-math </td><td class="markdownTableBodyLeft">enable-fast-math </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enables fast_math options in backends that support it.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">NumberOfThreads </td><td class="markdownTableBodyLeft">number-of-threads </td><td class="markdownTableBodyLeft">number-of-threads </td><td class="markdownTableBodyLeft">Integer [1-64] </td><td class="markdownTableBodyLeft">Assign the number of threads used by the CpuAcc backend. Input value must be between 1 and 64. Default is set to 0 (Backend will decide number of threads to use).  </td></tr>
</table>
<h2><a class="anchor" id="ethosnmodeloptions"></a>
EthosNAcc backend model options</h2>
<table class="markdownTable">
<tr class="markdownTableHead">
<th class="markdownTableHeadLeft">Arm NN Parameter </th><th class="markdownTableHeadLeft">Delegate </th><th class="markdownTableHeadLeft">Support library </th><th class="markdownTableHeadLeft">Values </th><th class="markdownTableHeadLeft">Description  </th></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">DisableWinograd </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Disables Winograd fast convolution.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">StrictPrecision </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">(Not available) </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">When enabled the network is more precise as the Re-quantize operations aren't fused, but it is slower to compile as there will be additional operations. This is currently only supported for the Concat operation.  </td></tr>
<tr class="markdownTableRowOdd">
<td class="markdownTableBodyLeft">SaveCachedNetwork </td><td class="markdownTableBodyLeft">save-cached-network </td><td class="markdownTableBodyLeft">save-cached-network </td><td class="markdownTableBodyLeft">["true"/"false"] </td><td class="markdownTableBodyLeft">Enables saving the cached network to the file given with cached-network-file option.  </td></tr>
<tr class="markdownTableRowEven">
<td class="markdownTableBodyLeft">CachedNetworkFilePath </td><td class="markdownTableBodyLeft">cached-network-filepath </td><td class="markdownTableBodyLeft">cached-network-file </td><td class="markdownTableBodyLeft">String file path </td><td class="markdownTableBodyLeft">If non-empty, the given file will be used to load/save cached network. If save-cached-network option is given will save the cached network to given file. If save-cached-network option is not given will load the cached network from given file.  </td></tr>
</table>
</div></div><!-- contents -->
</div><!-- PageDoc -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
  <ul>
    <li class="navelem"><a class="el" href="swtools.xhtml">Software Components</a></li><li class="navelem"><a class="el" href="supportlibrary.xhtml">NNAPI Support Library</a></li>
    <li class="footer">Generated on Thu May 18 2023 10:35:44 for ArmNN by
    <a href="http://www.doxygen.org/index.html">
    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
  </ul>
</div>
</body>
</html>