aboutsummaryrefslogtreecommitdiff
path: root/22.08/_custom_memory_allocator_sample_8cpp-example.xhtml
blob: 9ee572953b83c77249c40dbdf85e37fd9123cbe1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
<!-- Copyright (c) 2020 ARM Limited. -->
<!--                                 -->
<!-- SPDX-License-Identifier: MIT    -->
<!--                                 -->
<!-- HTML header for doxygen 1.8.13-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.13"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" />
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>ArmNN: CustomMemoryAllocatorSample.cpp</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
  $(document).ready(initResizable);
</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
  MathJax.Hub.Config({
    extensions: ["tex2jax.js"],
    jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 10rem; margin-top: .5rem; margin-left 10px"/>
  <td style="padding-left: 0.5em;">
   <div id="projectname">
   &#160;<span id="projectnumber">22.08</span>
   </div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.13 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
$(function() {
  initMenu('',true,false,'search.php','Search');
  $(document).ready(function() { init_search(); });
});
</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
  <div id="nav-tree">
    <div id="nav-tree-contents">
      <div id="nav-sync" class="sync"></div>
    </div>
  </div>
  <div id="splitbar" style="-moz-user-select:none;" 
       class="ui-resizable-handle">
  </div>
</div>
<script type="text/javascript">
$(document).ready(function(){initNavTree('_custom_memory_allocator_sample_8cpp-example.xhtml','');});
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="header">
  <div class="headertitle">
<div class="title">CustomMemoryAllocatorSample.cpp</div>  </div>
</div><!--header-->
<div class="contents">
<p>This example is basically a copy of the SimpleSample example. But it makes use of a CustomAllocator to allocate memory for the inputs, outputs and inter layer memory.</p>
<dl class="section note"><dt>Note</dt><dd>This is currently an experimental interface</dd></dl>
<div class="fragment"><div class="line"><span class="comment">//</span></div><div class="line"><span class="comment">// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.</span></div><div class="line"><span class="comment">// SPDX-License-Identifier: MIT</span></div><div class="line"><span class="comment">//</span></div><div class="line"></div><div class="line"><span class="preprocessor">#include &lt;<a class="code" href="_arm_n_n_8hpp.xhtml">armnn/ArmNN.hpp</a>&gt;</span></div><div class="line"><span class="preprocessor">#include &lt;<a class="code" href="_i_custom_allocator_8hpp.xhtml">armnn/backends/ICustomAllocator.hpp</a>&gt;</span></div><div class="line"></div><div class="line"><span class="preprocessor">#include &lt;arm_compute/core/CL/CLKernelLibrary.h&gt;</span></div><div class="line"><span class="preprocessor">#include &lt;arm_compute/runtime/CL/CLScheduler.h&gt;</span></div><div class="line"></div><div class="line"><span class="preprocessor">#include &lt;iostream&gt;</span></div><div class="line"><span class="comment"></span></div><div class="line"><span class="comment">/** Sample implementation of ICustomAllocator for use with the ClBackend.</span></div><div class="line"><span class="comment"> *  Note: any memory allocated must be host addressable with write access</span></div><div class="line"><span class="comment"> *  in order for ArmNN to be able to properly use it. */</span></div><div class="line"><span class="keyword">class </span>SampleClBackendCustomAllocator : <span class="keyword">public</span> <a name="_a0"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml">armnn::ICustomAllocator</a></div><div class="line">{</div><div class="line"><span class="keyword">public</span>:</div><div class="line">    SampleClBackendCustomAllocator() = <span class="keywordflow">default</span>;</div><div class="line"></div><div class="line">    <span class="keywordtype">void</span>* <a name="a1"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#a6ce9dda753afa9abb498f3d6b673e595">allocate</a>(<span class="keywordtype">size_t</span> size, <span class="keywordtype">size_t</span> alignment)<span class="keyword"> override</span></div><div class="line"><span class="keyword">    </span>{</div><div class="line">        <span class="comment">// If alignment is 0 just use the CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE for alignment</span></div><div class="line">        <span class="keywordflow">if</span> (alignment == 0)</div><div class="line">        {</div><div class="line">            alignment = arm_compute::CLKernelLibrary::get().get_device().getInfo&lt;CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE&gt;();</div><div class="line">        }</div><div class="line">        <span class="keywordtype">size_t</span> space = size + alignment + alignment;</div><div class="line">        <span class="keyword">auto</span> allocatedMemPtr = std::malloc(space * <span class="keyword">sizeof</span>(<span class="keywordtype">size_t</span>));</div><div class="line"></div><div class="line">        <span class="keywordflow">if</span> (std::align(alignment, size, allocatedMemPtr, space) == <span class="keyword">nullptr</span>)</div><div class="line">        {</div><div class="line">            <span class="keywordflow">throw</span> <a name="_a2"></a><a class="code" href="classarmnn_1_1_exception.xhtml">armnn::Exception</a>(<span class="stringliteral">&quot;SampleClBackendCustomAllocator::Alignment failed&quot;</span>);</div><div class="line">        }</div><div class="line">        <span class="keywordflow">return</span> allocatedMemPtr;</div><div class="line">    }</div><div class="line"></div><div class="line">    <span class="keywordtype">void</span> <a name="a3"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#a7dbeba9295a3f52ea54698c31e555dff">free</a>(<span class="keywordtype">void</span>* ptr)<span class="keyword"> override</span></div><div class="line"><span class="keyword">    </span>{</div><div class="line">        std::free(ptr);</div><div class="line">    }</div><div class="line"></div><div class="line">    <a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277">armnn::MemorySource</a> <a name="a4"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#abf02353c52af045a9af48bb40f857ad2">GetMemorySourceType</a>()<span class="keyword"> override</span></div><div class="line"><span class="keyword">    </span>{</div><div class="line">        <span class="keywordflow">return</span> <a name="a5"></a><a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">armnn::MemorySource::Malloc</a>;</div><div class="line">    }</div><div class="line">};</div><div class="line"></div><div class="line"></div><div class="line"><span class="comment">// A simple example application to show the usage of a custom memory allocator. In this sample, the users single</span></div><div class="line"><span class="comment">// input number is multiplied by 1.0f using a fully connected layer with a single neuron to produce an output</span></div><div class="line"><span class="comment">// number that is the same as the input. All memory required to execute this mini network is allocated with</span></div><div class="line"><span class="comment">// the provided custom allocator.</span></div><div class="line"><span class="comment">//</span></div><div class="line"><span class="comment">// Using a Custom Allocator is required for use with Protected Mode and Protected Memory.</span></div><div class="line"><span class="comment">// This example is provided using only unprotected malloc as Protected Memory is platform</span></div><div class="line"><span class="comment">// and implementation specific.</span></div><div class="line"><span class="comment">//</span></div><div class="line"><span class="comment">// Note: This example is similar to the SimpleSample application that can also be found in armnn/samples.</span></div><div class="line"><span class="comment">//       The differences are in the use of a custom allocator, the backend is GpuAcc, and the inputs/outputs</span></div><div class="line"><span class="comment">//       are being imported instead of copied. (Import must be enabled when using a Custom Allocator)</span></div><div class="line"><span class="comment">//       You might find this useful for comparison.</span></div><div class="line"><span class="keywordtype">int</span> <a name="a6"></a><a class="code" href="_armnn_converter_8cpp.xhtml#a0ddf1224851353fc92bfbff6f499fa97">main</a>()</div><div class="line">{</div><div class="line">    <span class="keyword">using namespace </span><a class="code" href="namespacearmnn.xhtml">armnn</a>;</div><div class="line"></div><div class="line">    <span class="keywordtype">float</span> number;</div><div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;Please enter a number: &quot;</span> &lt;&lt; std::endl;</div><div class="line">    std::cin &gt;&gt; number;</div><div class="line"></div><div class="line">    <span class="comment">// Turn on logging to standard output</span></div><div class="line">    <span class="comment">// This is useful in this sample so that users can learn more about what is going on</span></div><div class="line">    <a name="a7"></a><a class="code" href="namespacearmnn.xhtml#aa59f7a819c3e29d10ffc41e5c0616872">ConfigureLogging</a>(<span class="keyword">true</span>, <span class="keyword">false</span>, LogSeverity::Info);</div><div class="line"></div><div class="line">    <span class="comment">// Construct ArmNN network</span></div><div class="line">    <a class="code" href="namespacearmnn.xhtml#a0d8160388a127c1a23b37bc88dc6e2ec">NetworkId</a> networkIdentifier;</div><div class="line">    <a class="code" href="namespacearmnn.xhtml#ace74f6f9feb95a964a49d79458232703">INetworkPtr</a> network = INetwork::Create();</div><div class="line">    <a name="_a8"></a><a class="code" href="structarmnn_1_1_fully_connected_descriptor.xhtml">FullyConnectedDescriptor</a> fullyConnectedDesc;</div><div class="line">    <span class="keywordtype">float</span> weightsData[] = {1.0f}; <span class="comment">// Identity</span></div><div class="line">    <a name="_a9"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> weightsInfo(<a name="_a10"></a><a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), DataType::Float32, 0.0f, 0, <span class="keyword">true</span>);</div><div class="line">    weightsInfo.<a name="a11"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml#a8ffca1e21bdfa7f945617acd606aac91">SetConstant</a>(<span class="keyword">true</span>);</div><div class="line">    <a name="_a12"></a><a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> weights(weightsInfo, weightsData);</div><div class="line"></div><div class="line">    <a name="_a13"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* inputLayer   = network-&gt;AddInputLayer(0);</div><div class="line">    <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* weightsLayer = network-&gt;AddConstantLayer(weights, <span class="stringliteral">&quot;Weights&quot;</span>);</div><div class="line">    <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* fullyConnectedLayer =</div><div class="line">            network-&gt;AddFullyConnectedLayer(fullyConnectedDesc, <span class="stringliteral">&quot;fully connected&quot;</span>);</div><div class="line">    <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* outputLayer  = network-&gt;AddOutputLayer(0);</div><div class="line"></div><div class="line">    inputLayer-&gt;<a name="a14"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a name="a15"></a><a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(fullyConnectedLayer-&gt;<a name="a16"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(0));</div><div class="line">    weightsLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(1));</div><div class="line">    fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(outputLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(0));</div><div class="line">    weightsLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a name="a17"></a><a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(weightsInfo);</div><div class="line"></div><div class="line">    <span class="comment">// Create ArmNN runtime:</span></div><div class="line">    <span class="comment">//</span></div><div class="line">    <span class="comment">// This is the interesting bit when executing a model with a custom allocator.</span></div><div class="line">    <span class="comment">// You can have different allocators for different backends. To support this</span></div><div class="line">    <span class="comment">// the runtime creation option has a map that takes a BackendId and the corresponding</span></div><div class="line">    <span class="comment">// allocator that should be used for that backend.</span></div><div class="line">    <span class="comment">// Only GpuAcc supports a Custom Allocator for now</span></div><div class="line">    <span class="comment">//</span></div><div class="line">    <span class="comment">// Note: This is not covered in this example but if you want to run a model on</span></div><div class="line">    <span class="comment">//       protected memory a custom allocator needs to be provided that supports</span></div><div class="line">    <span class="comment">//       protected memory allocations and the MemorySource of that allocator is</span></div><div class="line">    <span class="comment">//       set to MemorySource::DmaBufProtected</span></div><div class="line">    <a name="_a18"></a><a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml">IRuntime::CreationOptions</a> options;</div><div class="line">    <span class="keyword">auto</span> customAllocator = std::make_shared&lt;SampleClBackendCustomAllocator&gt;();</div><div class="line">    options.<a name="a19"></a><a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a> = {{<span class="stringliteral">&quot;GpuAcc&quot;</span>, std::move(customAllocator)}};</div><div class="line">    <a class="code" href="namespacearmnn.xhtml#a150468a02bd7b2d2d061c4aaaee939f0">IRuntimePtr</a> runtime = IRuntime::Create(options);</div><div class="line"></div><div class="line">    <span class="comment">//Set the tensors in the network.</span></div><div class="line">    <a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> inputTensorInfo(<a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), DataType::Float32);</div><div class="line">    inputLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(inputTensorInfo);</div><div class="line"></div><div class="line">    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> numElements = inputTensorInfo.<a name="a20"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml#a8846406ac37fbd2204f0be16ee05d5b7">GetNumElements</a>();</div><div class="line">    <span class="keywordtype">size_t</span> totalBytes = numElements * <span class="keyword">sizeof</span>(float);</div><div class="line"></div><div class="line">    <a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> outputTensorInfo(<a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), DataType::Float32);</div><div class="line">    fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(outputTensorInfo);</div><div class="line"></div><div class="line">    <span class="comment">// Optimise ArmNN network</span></div><div class="line">    <a name="_a21"></a><a class="code" href="structarmnn_1_1_optimizer_options.xhtml">OptimizerOptions</a> optOptions;</div><div class="line">    optOptions.<a name="a22"></a><a class="code" href="structarmnn_1_1_optimizer_options.xhtml#a05c1bba6ba3ecc1339d4c4c10c0d8890">m_ImportEnabled</a> = <span class="keyword">true</span>;</div><div class="line">    <a class="code" href="namespacearmnn.xhtml#a674efcf6cbdb9e831d653ff0e821fb38">IOptimizedNetworkPtr</a> optNet =</div><div class="line">                <a name="a23"></a><a class="code" href="namespacearmnn.xhtml#a82e98ef05fd67036d1195ba17174d685">Optimize</a>(*network, {<span class="stringliteral">&quot;GpuAcc&quot;</span>}, runtime-&gt;GetDeviceSpec(), optOptions);</div><div class="line">    <span class="keywordflow">if</span> (!optNet)</div><div class="line">    {</div><div class="line">        <span class="comment">// This shouldn&#39;t happen for this simple sample, with GpuAcc backend.</span></div><div class="line">        <span class="comment">// But in general usage Optimize could fail if the backend at runtime cannot</span></div><div class="line">        <span class="comment">// support the model that has been provided.</span></div><div class="line">        std::cerr &lt;&lt; <span class="stringliteral">&quot;Error: Failed to optimise the input network.&quot;</span> &lt;&lt; std::endl;</div><div class="line">        <span class="keywordflow">return</span> 1;</div><div class="line">    }</div><div class="line"></div><div class="line">    <span class="comment">// Load graph into runtime</span></div><div class="line">    std::string ignoredErrorMessage;</div><div class="line">    <a name="_a24"></a><a class="code" href="structarmnn_1_1_i_network_properties.xhtml">INetworkProperties</a> networkProperties(<span class="keyword">false</span>, MemorySource::Malloc, MemorySource::Malloc);</div><div class="line">    runtime-&gt;LoadNetwork(networkIdentifier, std::move(optNet), ignoredErrorMessage, networkProperties);</div><div class="line"></div><div class="line">    <span class="comment">// Creates structures for input &amp; output</span></div><div class="line">    <span class="keyword">const</span> <span class="keywordtype">size_t</span> alignment =</div><div class="line">            arm_compute::CLKernelLibrary::get().get_device().getInfo&lt;CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE&gt;();</div><div class="line"></div><div class="line">    <span class="keywordtype">void</span>* alignedInputPtr = options.<a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a>[<span class="stringliteral">&quot;GpuAcc&quot;</span>]-&gt;allocate(totalBytes, alignment);</div><div class="line"></div><div class="line">    <span class="comment">// Input with negative values</span></div><div class="line">    <span class="keyword">auto</span>* inputPtr = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedInputPtr);</div><div class="line">    std::fill_n(inputPtr, numElements, number);</div><div class="line"></div><div class="line">    <span class="keywordtype">void</span>* alignedOutputPtr = options.<a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a>[<span class="stringliteral">&quot;GpuAcc&quot;</span>]-&gt;allocate(totalBytes, alignment);</div><div class="line">    <span class="keyword">auto</span>* outputPtr = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedOutputPtr);</div><div class="line">    std::fill_n(outputPtr, numElements, -10.0f);</div><div class="line"></div><div class="line">    inputTensorInfo = runtime-&gt;GetInputTensorInfo(networkIdentifier, 0);</div><div class="line">    inputTensorInfo.<a class="code" href="classarmnn_1_1_tensor_info.xhtml#a8ffca1e21bdfa7f945617acd606aac91">SetConstant</a>(<span class="keyword">true</span>);</div><div class="line">    <a class="code" href="namespacearmnn.xhtml#aa01bce88f89975a5a031db4cc8861527">InputTensors</a> inputTensors</div><div class="line">    {</div><div class="line">        {0, <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a>(inputTensorInfo, alignedInputPtr)},</div><div class="line">    };</div><div class="line">    <a class="code" href="namespacearmnn.xhtml#a8f091a512915d1cb29a4ebf13dfc53ea">OutputTensors</a> outputTensors</div><div class="line">    {</div><div class="line">        {0, <a name="_a25"></a><a class="code" href="classarmnn_1_1_tensor.xhtml">Tensor</a>(runtime-&gt;GetOutputTensorInfo(networkIdentifier, 0), alignedOutputPtr)}</div><div class="line">    };</div><div class="line"></div><div class="line">    <span class="comment">// Execute network</span></div><div class="line">    runtime-&gt;EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);</div><div class="line"></div><div class="line">    <span class="comment">// Tell the CLBackend to sync memory so we can read the output.</span></div><div class="line">    arm_compute::CLScheduler::get().sync();</div><div class="line">    <span class="keyword">auto</span>* outputResult = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedOutputPtr);</div><div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;Your number was &quot;</span> &lt;&lt; outputResult[0] &lt;&lt; std::endl;</div><div class="line">    runtime-&gt;UnloadNetwork(networkIdentifier);</div><div class="line">    <span class="keywordflow">return</span> 0;</div><div class="line"></div><div class="line">}</div></div><!-- fragment --> </div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
  <ul>
    <li class="footer">Generated on Fri Aug 19 2022 14:38:25 for ArmNN by
    <a href="http://www.doxygen.org/index.html">
    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.13 </li>
  </ul>
</div>
</body>
</html>