aboutsummaryrefslogtreecommitdiff
path: root/latest/_gpu_fsa_pre_compiled_workload_8cpp_source.html
blob: cd05326030d4ee88d65ce763e55cb1e7eafee700 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
<!-- HTML header for doxygen 1.8.17-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.17"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>Arm NN: src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
  MathJax.Hub.Config({
    extensions: ["tex2jax.js"],
    jax: ["input/TeX","output/HTML-CSS"],
});
</script>
<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="customdoxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 15rem; margin-top: .5rem; margin-left 13px"/>
  <td id="projectalign" style="padding-left: 0.9em;">
   <div id="projectname">
   &#160;<span id="projectnumber">24.05</span>
   </div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.17 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
  initMenu('',true,false,'search.php','Search');
  $(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
  <div id="nav-tree">
    <div id="nav-tree-contents">
      <div id="nav-sync" class="sync"></div>
    </div>
  </div>
  <div id="splitbar" style="-moz-user-select:none;" 
       class="ui-resizable-handle">
  </div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('_gpu_fsa_pre_compiled_workload_8cpp_source.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="header">
  <div class="headertitle">
<div class="title">GpuFsaPreCompiledWorkload.cpp</div>  </div>
</div><!--header-->
<div class="contents">
<a href="_gpu_fsa_pre_compiled_workload_8cpp.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno">    1</span>&#160;<span class="comment">//</span></div>
<div class="line"><a name="l00002"></a><span class="lineno">    2</span>&#160;<span class="comment">// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.</span></div>
<div class="line"><a name="l00003"></a><span class="lineno">    3</span>&#160;<span class="comment">// SPDX-License-Identifier: MIT</span></div>
<div class="line"><a name="l00004"></a><span class="lineno">    4</span>&#160;<span class="comment">//</span></div>
<div class="line"><a name="l00005"></a><span class="lineno">    5</span>&#160; </div>
<div class="line"><a name="l00006"></a><span class="lineno">    6</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_pre_compiled_workload_8hpp.html">GpuFsaPreCompiledWorkload.hpp</a>&quot;</span></div>
<div class="line"><a name="l00007"></a><span class="lineno">    7</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_workload_utils_8hpp.html">GpuFsaWorkloadUtils.hpp</a>&quot;</span></div>
<div class="line"><a name="l00008"></a><span class="lineno">    8</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_polymorphic_downcast_8hpp.html">armnn/utility/PolymorphicDowncast.hpp</a>&quot;</span></div>
<div class="line"><a name="l00009"></a><span class="lineno">    9</span>&#160; </div>
<div class="line"><a name="l00010"></a><span class="lineno">   10</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_gpu_fsa_tensor_handle_8hpp.html">gpuFsa/GpuFsaTensorHandle.hpp</a>&gt;</span></div>
<div class="line"><a name="l00011"></a><span class="lineno">   11</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_gpu_fsa_backend_8hpp.html">gpuFsa/GpuFsaBackend.hpp</a>&gt;</span></div>
<div class="line"><a name="l00012"></a><span class="lineno">   12</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_arm_compute_tensor_utils_8hpp.html">aclCommon/ArmComputeTensorUtils.hpp</a>&gt;</span></div>
<div class="line"><a name="l00013"></a><span class="lineno">   13</span>&#160;<span class="preprocessor">#include &lt;fmt/format.h&gt;</span></div>
<div class="line"><a name="l00014"></a><span class="lineno">   14</span>&#160; </div>
<div class="line"><a name="l00015"></a><span class="lineno">   15</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_arm_compute_tensor_utils_8hpp.html">aclCommon/ArmComputeTensorUtils.hpp</a>&gt;</span></div>
<div class="line"><a name="l00016"></a><span class="lineno">   16</span>&#160;<span class="preprocessor">#include &lt;arm_compute/runtime/CL/CLTensor.h&gt;</span></div>
<div class="line"><a name="l00017"></a><span class="lineno">   17</span>&#160;<span class="preprocessor">#include &lt;arm_compute/core/ITensorInfo.h&gt;</span></div>
<div class="line"><a name="l00018"></a><span class="lineno">   18</span>&#160;<span class="preprocessor">#include &lt;arm_compute/core/TensorInfo.h&gt;</span></div>
<div class="line"><a name="l00019"></a><span class="lineno">   19</span>&#160;<span class="preprocessor">#include &lt;arm_compute/core/TensorShape.h&gt;</span></div>
<div class="line"><a name="l00020"></a><span class="lineno">   20</span>&#160;<span class="preprocessor">#include &lt;arm_compute/core/CL/CLKernelLibrary.h&gt;</span></div>
<div class="line"><a name="l00021"></a><span class="lineno">   21</span>&#160;<span class="preprocessor">#include &lt;arm_compute/core/CL/CLCompileContext.h&gt;</span></div>
<div class="line"><a name="l00022"></a><span class="lineno">   22</span>&#160; </div>
<div class="line"><a name="l00023"></a><span class="lineno">   23</span>&#160;<span class="preprocessor">#include &lt;arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h&gt;</span></div>
<div class="line"><a name="l00024"></a><span class="lineno">   24</span>&#160;<span class="preprocessor">#include &lt;arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h&gt;</span></div>
<div class="line"><a name="l00025"></a><span class="lineno">   25</span>&#160;<span class="preprocessor">#include &lt;arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h&gt;</span></div>
<div class="line"><a name="l00026"></a><span class="lineno">   26</span>&#160;<span class="preprocessor">#include &lt;arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h&gt;</span></div>
<div class="line"><a name="l00027"></a><span class="lineno">   27</span>&#160;<span class="preprocessor">#include &lt;arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h&gt;</span></div>
<div class="line"><a name="l00028"></a><span class="lineno">   28</span>&#160; </div>
<div class="line"><a name="l00029"></a><span class="lineno">   29</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearmnn.html">armnn</a> {</div>
<div class="line"><a name="l00030"></a><span class="lineno">   30</span>&#160; </div>
<div class="line"><a name="l00031"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_pre_compiled_workload.html#aeef6e7488b495adc96f8abd7d29e3d5f">   31</a></span>&#160;<a class="code" href="classarmnn_1_1_gpu_fsa_pre_compiled_workload.html#aeef6e7488b495adc96f8abd7d29e3d5f">GpuFsaPreCompiledWorkload::GpuFsaPreCompiledWorkload</a>(<span class="keyword">const</span> <a class="code" href="structarmnn_1_1_pre_compiled_queue_descriptor.html">PreCompiledQueueDescriptor</a> &amp;descriptor,</div>
<div class="line"><a name="l00032"></a><span class="lineno">   32</span>&#160;                                                     <span class="keyword">const</span> <a class="code" href="structarmnn_1_1_workload_info.html">WorkloadInfo</a> &amp;info)</div>
<div class="line"><a name="l00033"></a><span class="lineno">   33</span>&#160;        : <a class="code" href="classarmnn_1_1_base_workload.html">BaseWorkload</a>&lt;<a class="code" href="structarmnn_1_1_pre_compiled_queue_descriptor.html">PreCompiledQueueDescriptor</a>&gt;(descriptor, <a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>), m_workloadInfo(<a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>)</div>
<div class="line"><a name="l00034"></a><span class="lineno">   34</span>&#160;{</div>
<div class="line"><a name="l00035"></a><span class="lineno">   35</span>&#160;    <span class="comment">// Check that the workload is holding a pointer to a valid pre-compiled object</span></div>
<div class="line"><a name="l00036"></a><span class="lineno">   36</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_pre_compiled_queue_descriptor.html#aa1fedd1a6371526cb68cc5cc58c87465">m_PreCompiledObject</a> == <span class="keyword">nullptr</span>)</div>
<div class="line"><a name="l00037"></a><span class="lineno">   37</span>&#160;    {</div>
<div class="line"><a name="l00038"></a><span class="lineno">   38</span>&#160;        <span class="keywordflow">throw</span> <a class="code" href="classarmnn_1_1_invalid_argument_exception.html">InvalidArgumentException</a>(</div>
<div class="line"><a name="l00039"></a><span class="lineno">   39</span>&#160;                <span class="stringliteral">&quot;GpuFsaPrecompiledWorkload requires a valid pre-compiled object (GpuWorkloadSketch).&quot;</span>);</div>
<div class="line"><a name="l00040"></a><span class="lineno">   40</span>&#160;    }</div>
<div class="line"><a name="l00041"></a><span class="lineno">   41</span>&#160;}</div>
<div class="line"><a name="l00042"></a><span class="lineno">   42</span>&#160; </div>
<div class="line"><a name="l00043"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_pre_compiled_workload.html#ae071e8822437c78baea75c3aef3a263a">   43</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarmnn_1_1_gpu_fsa_pre_compiled_workload.html#ae071e8822437c78baea75c3aef3a263a">GpuFsaPreCompiledWorkload::Execute</a>()<span class="keyword"> const</span></div>
<div class="line"><a name="l00044"></a><span class="lineno">   44</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00045"></a><span class="lineno">   45</span>&#160;<span class="comment">/*</span></div>
<div class="line"><a name="l00046"></a><span class="lineno">   46</span>&#160;<span class="comment"> * The Execute function of the GpuFsa Backends PreCompiled workload needs to jump through various hoops in order to</span></div>
<div class="line"><a name="l00047"></a><span class="lineno">   47</span>&#160;<span class="comment"> * create a valid sketch and runtime that can execute the kernel</span></div>
<div class="line"><a name="l00048"></a><span class="lineno">   48</span>&#160;<span class="comment"> * First we need all of the data stored within the PreCompiled blob which was used to setup the workload, namely:</span></div>
<div class="line"><a name="l00049"></a><span class="lineno">   49</span>&#160;<span class="comment"> * The GpuWorkloadContext, this is a context which contains the TensorInfos and is unique to the graph being run</span></div>
<div class="line"><a name="l00050"></a><span class="lineno">   50</span>&#160;<span class="comment"> * The Sketch, this can contain one or many ops and acts as a subgraph within the context</span></div>
<div class="line"><a name="l00051"></a><span class="lineno">   51</span>&#160;<span class="comment"> * The inputTensorInfos / outputTensorInfos, These are vectors containing the TensorInfos used when creating the sketch</span></div>
<div class="line"><a name="l00052"></a><span class="lineno">   52</span>&#160;<span class="comment"> *</span></div>
<div class="line"><a name="l00053"></a><span class="lineno">   53</span>&#160;<span class="comment"> * It is very important that the Tensors passed into the Runtime being used to execute this sketch are created with</span></div>
<div class="line"><a name="l00054"></a><span class="lineno">   54</span>&#160;<span class="comment"> * the same TensorInfos as used when creating the sketch. We do this by creating new tensors, getting the original</span></div>
<div class="line"><a name="l00055"></a><span class="lineno">   55</span>&#160;<span class="comment"> * TensorInfos from the vectors of tensorInfos stored in the blob, and then importing the buffers from our own</span></div>
<div class="line"><a name="l00056"></a><span class="lineno">   56</span>&#160;<span class="comment"> * TensorHandles directly into these newly created Tensors. This allows us to link the externally visible Tensors</span></div>
<div class="line"><a name="l00057"></a><span class="lineno">   57</span>&#160;<span class="comment"> * from ArmNN to the Tensors which are needed to execute with the Sketch.</span></div>
<div class="line"><a name="l00058"></a><span class="lineno">   58</span>&#160;<span class="comment"> *</span></div>
<div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160;<span class="comment"> */</span></div>
<div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;    <span class="keyword">using namespace </span>arm_compute::experimental::dynamic_fusion;</div>
<div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;    <span class="comment">// Get the runtime and configure it with the precompiled sketch</span></div>
<div class="line"><a name="l00062"></a><span class="lineno">   62</span>&#160;    ClWorkloadRuntime runtime;</div>
<div class="line"><a name="l00063"></a><span class="lineno">   63</span>&#160;    <a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html">GpuFsaPreCompiledBlob</a> *preCompiledBlob = <span class="keyword">static_cast&lt;</span><a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html">GpuFsaPreCompiledBlob</a>*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_pre_compiled_queue_descriptor.html#aa1fedd1a6371526cb68cc5cc58c87465">m_PreCompiledObject</a>);</div>
<div class="line"><a name="l00064"></a><span class="lineno">   64</span>&#160;    <span class="keyword">auto</span> sketch = preCompiledBlob-&gt;<a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a14f92a9f65e32c3da896e7b1d45abd02">sketch</a>.release();</div>
<div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;    <span class="keyword">auto</span> status = runtime.configure(*sketch);</div>
<div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160; </div>
<div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;    <span class="comment">// Get the TensorInfos stored within the PreCompiledBlob and check they&#39;re the right size</span></div>
<div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;    <span class="keyword">auto</span> inputTensorInfos = preCompiledBlob-&gt;<a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a479b90f0b24c427502d94b716117e034">inputTensorInfos</a>.get();</div>
<div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160;    <span class="keyword">auto</span> outputTensorInfos = preCompiledBlob-&gt;<a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#ac49bf679a23aa84f06a6bde3440a4c40">outputTensorInfos</a>.get();</div>
<div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;    <span class="keywordflow">if</span> (inputTensorInfos-&gt;size() != <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>.size())</div>
<div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160;    {</div>
<div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;        <span class="keywordflow">throw</span> <a class="code" href="classarmnn_1_1_invalid_argument_exception.html">InvalidArgumentException</a>(fmt::format(<span class="stringliteral">&quot;GpuFsaPreCompiledWorkload::Execute: The number of inputTensorInfos&quot;</span></div>
<div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;                                                   <span class="stringliteral">&quot; {} does not match the number of inputs {}.&quot;</span>,</div>
<div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160;                                                   inputTensorInfos-&gt;size(), <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>.size()));</div>
<div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;    }</div>
<div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;    <span class="keywordflow">if</span> (outputTensorInfos-&gt;size() != <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>.size())</div>
<div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160;    {</div>
<div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;        <span class="keywordflow">throw</span> <a class="code" href="classarmnn_1_1_invalid_argument_exception.html">InvalidArgumentException</a>(fmt::format(<span class="stringliteral">&quot;GpuFsaPreCompiledWorkload::Execute: The number of outputTensorInfos&quot;</span></div>
<div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160;                                                   <span class="stringliteral">&quot; {} does not match the number of outputs {}.&quot;</span>,</div>
<div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160;                                                   outputTensorInfos-&gt;size(), <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>.size()));</div>
<div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;    }</div>
<div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160; </div>
<div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160;    <span class="comment">// (Important) Allocate auxiliary tensor memory if there are any</span></div>
<div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;    <span class="keywordflow">for</span>(<span class="keyword">auto</span> &amp;data : runtime.get_auxiliary_tensors())</div>
<div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;    {</div>
<div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160;        arm_compute::CLTensor*     tensor      = std::get&lt;0&gt;(data);</div>
<div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160;        arm_compute::TensorInfo    <a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>        = std::get&lt;1&gt;(data);</div>
<div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;        arm_compute::experimental::dynamic_fusion::AuxMemoryInfo aux_mem_req = std::get&lt;2&gt;(data);</div>
<div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160;        tensor-&gt;allocator()-&gt;init(<a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>, aux_mem_req.alignment);</div>
<div class="line"><a name="l00090"></a><span class="lineno">   90</span>&#160;        tensor-&gt;allocator()-&gt;allocate(); <span class="comment">// Use ACL allocated memory</span></div>
<div class="line"><a name="l00091"></a><span class="lineno">   91</span>&#160;    }</div>
<div class="line"><a name="l00092"></a><span class="lineno">   92</span>&#160; </div>
<div class="line"><a name="l00093"></a><span class="lineno">   93</span>&#160;    <span class="comment">// Create and initialize user tensors</span></div>
<div class="line"><a name="l00094"></a><span class="lineno">   94</span>&#160;    std::vector&lt;arm_compute::CLTensor*&gt; inputsWeightsOutputs;</div>
<div class="line"><a name="l00095"></a><span class="lineno">   95</span>&#160;    inputsWeightsOutputs.reserve(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>.size() + <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>.size());</div>
<div class="line"><a name="l00096"></a><span class="lineno">   96</span>&#160; </div>
<div class="line"><a name="l00097"></a><span class="lineno">   97</span>&#160;    <span class="keywordflow">for</span> (uint32_t inputSlotIdx = 0; inputSlotIdx &lt; <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>.size(); ++inputSlotIdx)</div>
<div class="line"><a name="l00098"></a><span class="lineno">   98</span>&#160;    {</div>
<div class="line"><a name="l00099"></a><span class="lineno">   99</span>&#160;        arm_compute::CLTensor* input = <span class="keyword">new</span> arm_compute::CLTensor{};</div>
<div class="line"><a name="l00100"></a><span class="lineno">  100</span>&#160;        <span class="comment">// inputTensorInfos is a ptr to a vector of ptrs, so we need to do a double dereference</span></div>
<div class="line"><a name="l00101"></a><span class="lineno">  101</span>&#160;        input-&gt;allocator()-&gt;init(*((*inputTensorInfos)[inputSlotIdx]));</div>
<div class="line"><a name="l00102"></a><span class="lineno">  102</span>&#160;        <span class="keyword">auto</span>* inputHandle = PolymorphicDowncast&lt;GpuFsaTensorHandle*&gt;(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[inputSlotIdx]);</div>
<div class="line"><a name="l00103"></a><span class="lineno">  103</span>&#160;        input-&gt;allocator()-&gt;import_memory(inputHandle-&gt;GetTensor().cl_buffer());</div>
<div class="line"><a name="l00104"></a><span class="lineno">  104</span>&#160;        inputsWeightsOutputs.emplace_back(std::move(input));</div>
<div class="line"><a name="l00105"></a><span class="lineno">  105</span>&#160;    }</div>
<div class="line"><a name="l00106"></a><span class="lineno">  106</span>&#160;    <span class="comment">// Set the outputs</span></div>
<div class="line"><a name="l00107"></a><span class="lineno">  107</span>&#160;    <span class="keywordflow">for</span> (uint32_t outputSlotIdx = 0; outputSlotIdx &lt; <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>.size(); ++outputSlotIdx)</div>
<div class="line"><a name="l00108"></a><span class="lineno">  108</span>&#160;    {</div>
<div class="line"><a name="l00109"></a><span class="lineno">  109</span>&#160;        arm_compute::CLTensor* output = <span class="keyword">new</span> arm_compute::CLTensor{};</div>
<div class="line"><a name="l00110"></a><span class="lineno">  110</span>&#160;        <span class="comment">// outputTensorInfos is a ptr to a vector of ptrs, so we need to do a double dereference</span></div>
<div class="line"><a name="l00111"></a><span class="lineno">  111</span>&#160;        output-&gt;allocator()-&gt;init(*((*outputTensorInfos)[outputSlotIdx]));</div>
<div class="line"><a name="l00112"></a><span class="lineno">  112</span>&#160;        <span class="keyword">auto</span>* outputHandle = PolymorphicDowncast&lt;GpuFsaTensorHandle*&gt;(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>[outputSlotIdx]);</div>
<div class="line"><a name="l00113"></a><span class="lineno">  113</span>&#160;        output-&gt;allocator()-&gt;import_memory(outputHandle-&gt;GetTensor().cl_buffer());</div>
<div class="line"><a name="l00114"></a><span class="lineno">  114</span>&#160;        inputsWeightsOutputs.emplace_back(std::move(output));</div>
<div class="line"><a name="l00115"></a><span class="lineno">  115</span>&#160;    }</div>
<div class="line"><a name="l00116"></a><span class="lineno">  116</span>&#160;    runtime.run(inputsWeightsOutputs);</div>
<div class="line"><a name="l00117"></a><span class="lineno">  117</span>&#160;}</div>
<div class="line"><a name="l00118"></a><span class="lineno">  118</span>&#160;} <span class="comment">// namespace armnn</span></div>
</div><!-- fragment --></div><!-- contents -->
</div><!-- doc-content -->
<div class="ttc" id="astructarmnn_1_1_pre_compiled_queue_descriptor_html_aa1fedd1a6371526cb68cc5cc58c87465"><div class="ttname"><a href="structarmnn_1_1_pre_compiled_queue_descriptor.html#aa1fedd1a6371526cb68cc5cc58c87465">armnn::PreCompiledQueueDescriptor::m_PreCompiledObject</a></div><div class="ttdeci">void * m_PreCompiledObject</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00519">WorkloadData.hpp:519</a></div></div>
<div class="ttc" id="astructarmnn_1_1_pre_compiled_queue_descriptor_html"><div class="ttname"><a href="structarmnn_1_1_pre_compiled_queue_descriptor.html">armnn::PreCompiledQueueDescriptor</a></div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00512">WorkloadData.hpp:512</a></div></div>
<div class="ttc" id="a_gpu_fsa_tensor_handle_8hpp_html"><div class="ttname"><a href="_gpu_fsa_tensor_handle_8hpp.html">GpuFsaTensorHandle.hpp</a></div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html_a479b90f0b24c427502d94b716117e034"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a479b90f0b24c427502d94b716117e034">armnn::GpuFsaPreCompiledBlob::inputTensorInfos</a></div><div class="ttdeci">std::unique_ptr&lt; std::vector&lt; arm_compute::ITensorInfo * &gt; &gt; inputTensorInfos</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00037">GpuFsaBackend.hpp:37</a></div></div>
<div class="ttc" id="astructarmnn_1_1_workload_info_html"><div class="ttname"><a href="structarmnn_1_1_workload_info.html">armnn::WorkloadInfo</a></div><div class="ttdoc">Contains information about TensorInfos of a layer.</div><div class="ttdef"><b>Definition:</b> <a href="_workload_info_8hpp_source.html#l00016">WorkloadInfo.hpp:16</a></div></div>
<div class="ttc" id="a_polymorphic_downcast_8hpp_html"><div class="ttname"><a href="_polymorphic_downcast_8hpp.html">PolymorphicDowncast.hpp</a></div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html_a14f92a9f65e32c3da896e7b1d45abd02"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a14f92a9f65e32c3da896e7b1d45abd02">armnn::GpuFsaPreCompiledBlob::sketch</a></div><div class="ttdeci">std::unique_ptr&lt; arm_compute::experimental::dynamic_fusion::GpuWorkloadSketch &gt; sketch</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00034">GpuFsaBackend.hpp:34</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_invalid_argument_exception_html"><div class="ttname"><a href="classarmnn_1_1_invalid_argument_exception.html">armnn::InvalidArgumentException</a></div><div class="ttdef"><b>Definition:</b> <a href="_exceptions_8hpp_source.html#l00080">Exceptions.hpp:80</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c"><div class="ttname"><a href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">armnn::BoostLogSeverityMapping::info</a></div><div class="ttdeci">@ info</div></div>
<div class="ttc" id="astructarmnn_1_1_queue_descriptor_html_a6abd491bb99ffe88bd472c1ae5a1ed1a"><div class="ttname"><a href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">armnn::QueueDescriptor::m_Outputs</a></div><div class="ttdeci">std::vector&lt; ITensorHandle * &gt; m_Outputs</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00027">WorkloadData.hpp:27</a></div></div>
<div class="ttc" id="a_gpu_fsa_workload_utils_8hpp_html"><div class="ttname"><a href="_gpu_fsa_workload_utils_8hpp.html">GpuFsaWorkloadUtils.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_base_workload_html"><div class="ttname"><a href="classarmnn_1_1_base_workload.html">armnn::BaseWorkload</a></div><div class="ttdef"><b>Definition:</b> <a href="_workload_8hpp_source.html#l00033">Workload.hpp:33</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_pre_compiled_workload_html_aeef6e7488b495adc96f8abd7d29e3d5f"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_pre_compiled_workload.html#aeef6e7488b495adc96f8abd7d29e3d5f">armnn::GpuFsaPreCompiledWorkload::GpuFsaPreCompiledWorkload</a></div><div class="ttdeci">GpuFsaPreCompiledWorkload(const PreCompiledQueueDescriptor &amp;descriptor, const WorkloadInfo &amp;info)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_pre_compiled_workload_8cpp_source.html#l00031">GpuFsaPreCompiledWorkload.cpp:31</a></div></div>
<div class="ttc" id="a_gpu_fsa_backend_8hpp_html"><div class="ttname"><a href="_gpu_fsa_backend_8hpp.html">GpuFsaBackend.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_base_workload_html_afb8d2c8817c75de9d01a4c0e0d5c160b"><div class="ttname"><a href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">armnn::BaseWorkload&lt; PreCompiledQueueDescriptor &gt;::m_Data</a></div><div class="ttdeci">PreCompiledQueueDescriptor m_Data</div><div class="ttdef"><b>Definition:</b> <a href="_workload_8hpp_source.html#l00089">Workload.hpp:89</a></div></div>
<div class="ttc" id="anamespacearmnn_html"><div class="ttname"><a href="namespacearmnn.html">armnn</a></div><div class="ttdoc">Copyright (c) 2021 ARM Limited and Contributors.</div><div class="ttdef"><b>Definition:</b> <a href="01__00__quick__start_8dox_source.html#l00006">01_00_quick_start.dox:6</a></div></div>
<div class="ttc" id="a_arm_compute_tensor_utils_8hpp_html"><div class="ttname"><a href="_arm_compute_tensor_utils_8hpp.html">ArmComputeTensorUtils.hpp</a></div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html">armnn::GpuFsaPreCompiledBlob</a></div><div class="ttdoc">A structure which contains all the elements needed to execute a fused workload in the GpuFsa Backend.</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00032">GpuFsaBackend.hpp:32</a></div></div>
<div class="ttc" id="a_gpu_fsa_pre_compiled_workload_8hpp_html"><div class="ttname"><a href="_gpu_fsa_pre_compiled_workload_8hpp.html">GpuFsaPreCompiledWorkload.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_pre_compiled_workload_html_ae071e8822437c78baea75c3aef3a263a"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_pre_compiled_workload.html#ae071e8822437c78baea75c3aef3a263a">armnn::GpuFsaPreCompiledWorkload::Execute</a></div><div class="ttdeci">void Execute() const override</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_pre_compiled_workload_8cpp_source.html#l00043">GpuFsaPreCompiledWorkload.cpp:43</a></div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html_ac49bf679a23aa84f06a6bde3440a4c40"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#ac49bf679a23aa84f06a6bde3440a4c40">armnn::GpuFsaPreCompiledBlob::outputTensorInfos</a></div><div class="ttdeci">std::unique_ptr&lt; std::vector&lt; arm_compute::ITensorInfo * &gt; &gt; outputTensorInfos</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00038">GpuFsaBackend.hpp:38</a></div></div>
<div class="ttc" id="astructarmnn_1_1_queue_descriptor_html_a4b50e46a6810018f3edecfb68b2a76b3"><div class="ttname"><a href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">armnn::QueueDescriptor::m_Inputs</a></div><div class="ttdeci">std::vector&lt; ITensorHandle * &gt; m_Inputs</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00026">WorkloadData.hpp:26</a></div></div>
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
  <ul>
    <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_0f3cdec46afbc61a1ded8e1687c9c9a0.html">backends</a></li><li class="navelem"><a class="el" href="dir_dd5880bc3520e42c5318e86a9fdc97f6.html">gpuFsa</a></li><li class="navelem"><a class="el" href="dir_98e2ef7b561bcb53fb92be16bdf83115.html">workloads</a></li><li class="navelem"><a class="el" href="_gpu_fsa_pre_compiled_workload_8cpp.html">GpuFsaPreCompiledWorkload.cpp</a></li>
    <li class="footer">Generated on Thu May 16 2024 09:31:47 for Arm NN by
    <a href="http://www.doxygen.org/index.html">
    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
  </ul>
</div>
</body>
</html>