aboutsummaryrefslogtreecommitdiff
path: root/23.02/_custom_memory_allocator_sample_8cpp-example.xhtml
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2023-03-22 16:48:58 +0000
committerDavid Monahan <david.monahan@arm.com>2023-03-22 16:48:58 +0000
commitae050524109f1ce827962665436ef7430f2ac479 (patch)
treea087fe0c77570971dd7979f2757426c24e91afc7 /23.02/_custom_memory_allocator_sample_8cpp-example.xhtml
parent8d2ca734165a068478df7cffa46185680b05cd20 (diff)
downloadarmnn-ae050524109f1ce827962665436ef7430f2ac479.tar.gz
IVGCVSW-7255 Update Doxygen Documentation and publish on GitHub.
* Updating Doxygen documentation for 23.02 release. Signed-off-by: David Monahan <david.monahan@arm.com> Change-Id: I545574ff7664b4595d2fe6a91a3c35d2ad55df82
Diffstat (limited to '23.02/_custom_memory_allocator_sample_8cpp-example.xhtml')
-rw-r--r--23.02/_custom_memory_allocator_sample_8cpp-example.xhtml240
1 files changed, 229 insertions, 11 deletions
diff --git a/23.02/_custom_memory_allocator_sample_8cpp-example.xhtml b/23.02/_custom_memory_allocator_sample_8cpp-example.xhtml
index 2629629fbd..ae54aa412f 100644
--- a/23.02/_custom_memory_allocator_sample_8cpp-example.xhtml
+++ b/23.02/_custom_memory_allocator_sample_8cpp-example.xhtml
@@ -8,7 +8,7 @@
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
-<meta name="generator" content="Doxygen 1.8.13"/>
+<meta name="generator" content="Doxygen 1.8.17"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" />
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>ArmNN: CustomMemoryAllocatorSample.cpp</title>
@@ -19,9 +19,6 @@
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
-<script type="text/javascript">
- $(document).ready(initResizable);
-</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
@@ -30,7 +27,8 @@
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
-</script><script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
+</script>
+<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
@@ -51,18 +49,21 @@
</table>
</div>
<!-- end header part -->
-<!-- Generated by Doxygen 1.8.13 -->
+<!-- Generated by Doxygen 1.8.17 -->
<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
+/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
-</script>
+/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
@@ -76,7 +77,9 @@ $(function() {
</div>
</div>
<script type="text/javascript">
-$(document).ready(function(){initNavTree('_custom_memory_allocator_sample_8cpp-example.xhtml','');});
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+$(document).ready(function(){initNavTree('_custom_memory_allocator_sample_8cpp-example.xhtml',''); initResizable(); });
+/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
@@ -100,14 +103,229 @@ $(document).ready(function(){initNavTree('_custom_memory_allocator_sample_8cpp-e
<div class="contents">
<p>This example is basically a copy of the SimpleSample example. But it makes use of a CustomAllocator to allocate memory for the inputs, outputs and inter layer memory.</p>
<dl class="section note"><dt>Note</dt><dd>This is currently an experimental interface</dd></dl>
-<div class="fragment"><div class="line"><span class="comment">//</span></div><div class="line"><span class="comment">// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.</span></div><div class="line"><span class="comment">// SPDX-License-Identifier: MIT</span></div><div class="line"><span class="comment">//</span></div><div class="line"></div><div class="line"><span class="preprocessor">#include &lt;<a class="code" href="_arm_n_n_8hpp.xhtml">armnn/ArmNN.hpp</a>&gt;</span></div><div class="line"><span class="preprocessor">#include &lt;<a class="code" href="_i_custom_allocator_8hpp.xhtml">armnn/backends/ICustomAllocator.hpp</a>&gt;</span></div><div class="line"></div><div class="line"><span class="preprocessor">#include &lt;arm_compute/core/CL/CLKernelLibrary.h&gt;</span></div><div class="line"><span class="preprocessor">#include &lt;arm_compute/runtime/CL/CLScheduler.h&gt;</span></div><div class="line"></div><div class="line"><span class="preprocessor">#include &lt;iostream&gt;</span></div><div class="line"><span class="comment"></span></div><div class="line"><span class="comment">/** Sample implementation of ICustomAllocator for use with the ClBackend.</span></div><div class="line"><span class="comment"> * Note: any memory allocated must be host addressable with write access</span></div><div class="line"><span class="comment"> * in order for ArmNN to be able to properly use it. */</span></div><div class="line"><span class="keyword">class </span>SampleClBackendCustomAllocator : <span class="keyword">public</span> <a name="_a0"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml">armnn::ICustomAllocator</a></div><div class="line">{</div><div class="line"><span class="keyword">public</span>:</div><div class="line"> SampleClBackendCustomAllocator() = <span class="keywordflow">default</span>;</div><div class="line"></div><div class="line"> <span class="keywordtype">void</span>* <a name="a1"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#a6ce9dda753afa9abb498f3d6b673e595">allocate</a>(<span class="keywordtype">size_t</span> size, <span class="keywordtype">size_t</span> alignment)<span class="keyword"> override</span></div><div class="line"><span class="keyword"> </span>{</div><div class="line"> <span class="comment">// If alignment is 0 just use the CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE for alignment</span></div><div class="line"> <span class="keywordflow">if</span> (alignment == 0)</div><div class="line"> {</div><div class="line"> alignment = arm_compute::CLKernelLibrary::get().get_device().getInfo&lt;CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE&gt;();</div><div class="line"> }</div><div class="line"> <span class="keywordtype">size_t</span> space = size + alignment + alignment;</div><div class="line"> <span class="keyword">auto</span> allocatedMemPtr = std::malloc(space * <span class="keyword">sizeof</span>(<span class="keywordtype">size_t</span>));</div><div class="line"></div><div class="line"> <span class="keywordflow">if</span> (std::align(alignment, size, allocatedMemPtr, space) == <span class="keyword">nullptr</span>)</div><div class="line"> {</div><div class="line"> <span class="keywordflow">throw</span> <a name="_a2"></a><a class="code" href="classarmnn_1_1_exception.xhtml">armnn::Exception</a>(<span class="stringliteral">&quot;SampleClBackendCustomAllocator::Alignment failed&quot;</span>);</div><div class="line"> }</div><div class="line"> <span class="keywordflow">return</span> allocatedMemPtr;</div><div class="line"> }</div><div class="line"></div><div class="line"> <span class="keywordtype">void</span> <a name="a3"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#a7dbeba9295a3f52ea54698c31e555dff">free</a>(<span class="keywordtype">void</span>* ptr)<span class="keyword"> override</span></div><div class="line"><span class="keyword"> </span>{</div><div class="line"> std::free(ptr);</div><div class="line"> }</div><div class="line"></div><div class="line"> <a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277">armnn::MemorySource</a> <a name="a4"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#abf02353c52af045a9af48bb40f857ad2">GetMemorySourceType</a>()<span class="keyword"> override</span></div><div class="line"><span class="keyword"> </span>{</div><div class="line"> <span class="keywordflow">return</span> <a name="a5"></a><a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">armnn::MemorySource::Malloc</a>;</div><div class="line"> }</div><div class="line">};</div><div class="line"></div><div class="line"></div><div class="line"><span class="comment">// A simple example application to show the usage of a custom memory allocator. In this sample, the users single</span></div><div class="line"><span class="comment">// input number is multiplied by 1.0f using a fully connected layer with a single neuron to produce an output</span></div><div class="line"><span class="comment">// number that is the same as the input. All memory required to execute this mini network is allocated with</span></div><div class="line"><span class="comment">// the provided custom allocator.</span></div><div class="line"><span class="comment">//</span></div><div class="line"><span class="comment">// Using a Custom Allocator is required for use with Protected Mode and Protected Memory.</span></div><div class="line"><span class="comment">// This example is provided using only unprotected malloc as Protected Memory is platform</span></div><div class="line"><span class="comment">// and implementation specific.</span></div><div class="line"><span class="comment">//</span></div><div class="line"><span class="comment">// Note: This example is similar to the SimpleSample application that can also be found in armnn/samples.</span></div><div class="line"><span class="comment">// The differences are in the use of a custom allocator, the backend is GpuAcc, and the inputs/outputs</span></div><div class="line"><span class="comment">// are being imported instead of copied. (Import must be enabled when using a Custom Allocator)</span></div><div class="line"><span class="comment">// You might find this useful for comparison.</span></div><div class="line"><span class="keywordtype">int</span> <a name="a6"></a><a class="code" href="_armnn_converter_8cpp.xhtml#a0ddf1224851353fc92bfbff6f499fa97">main</a>()</div><div class="line">{</div><div class="line"> <span class="keyword">using namespace </span><a class="code" href="namespacearmnn.xhtml">armnn</a>;</div><div class="line"></div><div class="line"> <span class="keywordtype">float</span> number;</div><div class="line"> std::cout &lt;&lt; <span class="stringliteral">&quot;Please enter a number: &quot;</span> &lt;&lt; std::endl;</div><div class="line"> std::cin &gt;&gt; number;</div><div class="line"></div><div class="line"> <span class="comment">// Turn on logging to standard output</span></div><div class="line"> <span class="comment">// This is useful in this sample so that users can learn more about what is going on</span></div><div class="line"> <a name="a7"></a><a class="code" href="namespacearmnn.xhtml#aa59f7a819c3e29d10ffc41e5c0616872">ConfigureLogging</a>(<span class="keyword">true</span>, <span class="keyword">false</span>, LogSeverity::Info);</div><div class="line"></div><div class="line"> <span class="comment">// Construct ArmNN network</span></div><div class="line"> <a class="code" href="namespacearmnn.xhtml#a0d8160388a127c1a23b37bc88dc6e2ec">NetworkId</a> networkIdentifier;</div><div class="line"> <a class="code" href="namespacearmnn.xhtml#ace74f6f9feb95a964a49d79458232703">INetworkPtr</a> network = INetwork::Create();</div><div class="line"> <a name="_a8"></a><a class="code" href="structarmnn_1_1_fully_connected_descriptor.xhtml">FullyConnectedDescriptor</a> fullyConnectedDesc;</div><div class="line"> <span class="keywordtype">float</span> weightsData[] = {1.0f}; <span class="comment">// Identity</span></div><div class="line"> <a name="_a9"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> weightsInfo(<a name="_a10"></a><a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), DataType::Float32, 0.0f, 0, <span class="keyword">true</span>);</div><div class="line"> weightsInfo.<a name="a11"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml#a8ffca1e21bdfa7f945617acd606aac91">SetConstant</a>(<span class="keyword">true</span>);</div><div class="line"> <a name="_a12"></a><a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> weights(weightsInfo, weightsData);</div><div class="line"></div><div class="line"> <a name="_a13"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* inputLayer = network-&gt;AddInputLayer(0);</div><div class="line"> <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* weightsLayer = network-&gt;AddConstantLayer(weights, <span class="stringliteral">&quot;Weights&quot;</span>);</div><div class="line"> <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* fullyConnectedLayer =</div><div class="line"> network-&gt;AddFullyConnectedLayer(fullyConnectedDesc, <span class="stringliteral">&quot;fully connected&quot;</span>);</div><div class="line"> <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* outputLayer = network-&gt;AddOutputLayer(0);</div><div class="line"></div><div class="line"> inputLayer-&gt;<a name="a14"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a name="a15"></a><a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(fullyConnectedLayer-&gt;<a name="a16"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(0));</div><div class="line"> weightsLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(1));</div><div class="line"> fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(outputLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(0));</div><div class="line"> weightsLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a name="a17"></a><a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(weightsInfo);</div><div class="line"></div><div class="line"> <span class="comment">// Create ArmNN runtime:</span></div><div class="line"> <span class="comment">//</span></div><div class="line"> <span class="comment">// This is the interesting bit when executing a model with a custom allocator.</span></div><div class="line"> <span class="comment">// You can have different allocators for different backends. To support this</span></div><div class="line"> <span class="comment">// the runtime creation option has a map that takes a BackendId and the corresponding</span></div><div class="line"> <span class="comment">// allocator that should be used for that backend.</span></div><div class="line"> <span class="comment">// Only GpuAcc supports a Custom Allocator for now</span></div><div class="line"> <span class="comment">//</span></div><div class="line"> <span class="comment">// Note: This is not covered in this example but if you want to run a model on</span></div><div class="line"> <span class="comment">// protected memory a custom allocator needs to be provided that supports</span></div><div class="line"> <span class="comment">// protected memory allocations and the MemorySource of that allocator is</span></div><div class="line"> <span class="comment">// set to MemorySource::DmaBufProtected</span></div><div class="line"> <a name="_a18"></a><a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml">IRuntime::CreationOptions</a> options;</div><div class="line"> <span class="keyword">auto</span> customAllocator = std::make_shared&lt;SampleClBackendCustomAllocator&gt;();</div><div class="line"> options.<a name="a19"></a><a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a> = {{<span class="stringliteral">&quot;GpuAcc&quot;</span>, std::move(customAllocator)}};</div><div class="line"> <a class="code" href="namespacearmnn.xhtml#a150468a02bd7b2d2d061c4aaaee939f0">IRuntimePtr</a> runtime = IRuntime::Create(options);</div><div class="line"></div><div class="line"> <span class="comment">//Set the tensors in the network.</span></div><div class="line"> <a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> inputTensorInfo(<a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), DataType::Float32);</div><div class="line"> inputLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(inputTensorInfo);</div><div class="line"></div><div class="line"> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> numElements = inputTensorInfo.<a name="a20"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml#a8846406ac37fbd2204f0be16ee05d5b7">GetNumElements</a>();</div><div class="line"> <span class="keywordtype">size_t</span> totalBytes = numElements * <span class="keyword">sizeof</span>(float);</div><div class="line"></div><div class="line"> <a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> outputTensorInfo(<a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), DataType::Float32);</div><div class="line"> fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(outputTensorInfo);</div><div class="line"></div><div class="line"> <span class="comment">// Optimise ArmNN network</span></div><div class="line"> <a name="_a21"></a><a class="code" href="structarmnn_1_1_optimizer_options.xhtml">OptimizerOptions</a> optOptions;</div><div class="line"> optOptions.<a name="a22"></a><a class="code" href="structarmnn_1_1_optimizer_options.xhtml#a05c1bba6ba3ecc1339d4c4c10c0d8890">m_ImportEnabled</a> = <span class="keyword">true</span>;</div><div class="line"> <a class="code" href="namespacearmnn.xhtml#a674efcf6cbdb9e831d653ff0e821fb38">IOptimizedNetworkPtr</a> optNet =</div><div class="line"> <a name="a23"></a><a class="code" href="namespacearmnn.xhtml#a82e98ef05fd67036d1195ba17174d685">Optimize</a>(*network, {<span class="stringliteral">&quot;GpuAcc&quot;</span>}, runtime-&gt;GetDeviceSpec(), optOptions);</div><div class="line"> <span class="keywordflow">if</span> (!optNet)</div><div class="line"> {</div><div class="line"> <span class="comment">// This shouldn&#39;t happen for this simple sample, with GpuAcc backend.</span></div><div class="line"> <span class="comment">// But in general usage Optimize could fail if the backend at runtime cannot</span></div><div class="line"> <span class="comment">// support the model that has been provided.</span></div><div class="line"> std::cerr &lt;&lt; <span class="stringliteral">&quot;Error: Failed to optimise the input network.&quot;</span> &lt;&lt; std::endl;</div><div class="line"> <span class="keywordflow">return</span> 1;</div><div class="line"> }</div><div class="line"></div><div class="line"> <span class="comment">// Load graph into runtime</span></div><div class="line"> std::string ignoredErrorMessage;</div><div class="line"> <a name="_a24"></a><a class="code" href="structarmnn_1_1_i_network_properties.xhtml">INetworkProperties</a> networkProperties(<span class="keyword">false</span>, MemorySource::Malloc, MemorySource::Malloc);</div><div class="line"> runtime-&gt;LoadNetwork(networkIdentifier, std::move(optNet), ignoredErrorMessage, networkProperties);</div><div class="line"></div><div class="line"> <span class="comment">// Creates structures for input &amp; output</span></div><div class="line"> <span class="keyword">const</span> <span class="keywordtype">size_t</span> alignment =</div><div class="line"> arm_compute::CLKernelLibrary::get().get_device().getInfo&lt;CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE&gt;();</div><div class="line"></div><div class="line"> <span class="keywordtype">void</span>* alignedInputPtr = options.<a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a>[<span class="stringliteral">&quot;GpuAcc&quot;</span>]-&gt;allocate(totalBytes, alignment);</div><div class="line"></div><div class="line"> <span class="comment">// Input with negative values</span></div><div class="line"> <span class="keyword">auto</span>* inputPtr = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedInputPtr);</div><div class="line"> std::fill_n(inputPtr, numElements, number);</div><div class="line"></div><div class="line"> <span class="keywordtype">void</span>* alignedOutputPtr = options.<a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a>[<span class="stringliteral">&quot;GpuAcc&quot;</span>]-&gt;allocate(totalBytes, alignment);</div><div class="line"> <span class="keyword">auto</span>* outputPtr = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedOutputPtr);</div><div class="line"> std::fill_n(outputPtr, numElements, -10.0f);</div><div class="line"></div><div class="line"> inputTensorInfo = runtime-&gt;GetInputTensorInfo(networkIdentifier, 0);</div><div class="line"> inputTensorInfo.<a class="code" href="classarmnn_1_1_tensor_info.xhtml#a8ffca1e21bdfa7f945617acd606aac91">SetConstant</a>(<span class="keyword">true</span>);</div><div class="line"> <a class="code" href="namespacearmnn.xhtml#aa01bce88f89975a5a031db4cc8861527">InputTensors</a> inputTensors</div><div class="line"> {</div><div class="line"> {0, <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a>(inputTensorInfo, alignedInputPtr)},</div><div class="line"> };</div><div class="line"> <a class="code" href="namespacearmnn.xhtml#a8f091a512915d1cb29a4ebf13dfc53ea">OutputTensors</a> outputTensors</div><div class="line"> {</div><div class="line"> {0, <a name="_a25"></a><a class="code" href="classarmnn_1_1_tensor.xhtml">Tensor</a>(runtime-&gt;GetOutputTensorInfo(networkIdentifier, 0), alignedOutputPtr)}</div><div class="line"> };</div><div class="line"></div><div class="line"> <span class="comment">// Execute network</span></div><div class="line"> runtime-&gt;EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);</div><div class="line"></div><div class="line"> <span class="comment">// Tell the CLBackend to sync memory so we can read the output.</span></div><div class="line"> arm_compute::CLScheduler::get().sync();</div><div class="line"> <span class="keyword">auto</span>* outputResult = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedOutputPtr);</div><div class="line"> std::cout &lt;&lt; <span class="stringliteral">&quot;Your number was &quot;</span> &lt;&lt; outputResult[0] &lt;&lt; std::endl;</div><div class="line"> runtime-&gt;UnloadNetwork(networkIdentifier);</div><div class="line"> <span class="keywordflow">return</span> 0;</div><div class="line"></div><div class="line">}</div></div><!-- fragment --> </div><!-- contents -->
+<div class="fragment"><div class="line"><span class="comment">//</span></div>
+<div class="line"><span class="comment">// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.</span></div>
+<div class="line"><span class="comment">// SPDX-License-Identifier: MIT</span></div>
+<div class="line"><span class="comment">//</span></div>
+<div class="line"> </div>
+<div class="line"><span class="preprocessor">#include &lt;<a class="code" href="_arm_n_n_8hpp.xhtml">armnn/ArmNN.hpp</a>&gt;</span></div>
+<div class="line"><span class="preprocessor">#include &lt;<a class="code" href="_i_custom_allocator_8hpp.xhtml">armnn/backends/ICustomAllocator.hpp</a>&gt;</span></div>
+<div class="line"> </div>
+<div class="line"><span class="preprocessor">#include &lt;arm_compute/core/CL/CLKernelLibrary.h&gt;</span></div>
+<div class="line"><span class="preprocessor">#include &lt;arm_compute/runtime/CL/CLScheduler.h&gt;</span></div>
+<div class="line"> </div>
+<div class="line"><span class="preprocessor">#include &lt;iostream&gt;</span></div>
+<div class="line"><span class="comment"></span> </div>
+<div class="line"><span class="comment">/** Sample implementation of ICustomAllocator for use with the ClBackend.</span></div>
+<div class="line"><span class="comment"> * Note: any memory allocated must be host addressable with write access</span></div>
+<div class="line"><span class="comment"> * in order for ArmNN to be able to properly use it. */</span></div>
+<div class="line"><span class="keyword">class </span>SampleClBackendCustomAllocator : <span class="keyword">public</span> <a name="_a0"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml">armnn::ICustomAllocator</a></div>
+<div class="line">{</div>
+<div class="line"><span class="keyword">public</span>:</div>
+<div class="line"> SampleClBackendCustomAllocator() = <span class="keywordflow">default</span>;</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordtype">void</span>* <a name="a1"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#a6ce9dda753afa9abb498f3d6b673e595">allocate</a>(<span class="keywordtype">size_t</span> size, <span class="keywordtype">size_t</span> alignment)<span class="keyword"> override</span></div>
+<div class="line"><span class="keyword"> </span>{</div>
+<div class="line"> <span class="comment">// If alignment is 0 just use the CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE for alignment</span></div>
+<div class="line"> <span class="keywordflow">if</span> (alignment == 0)</div>
+<div class="line"> {</div>
+<div class="line"> alignment = arm_compute::CLKernelLibrary::get().get_device().getInfo&lt;CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE&gt;();</div>
+<div class="line"> }</div>
+<div class="line"> <span class="keywordtype">size_t</span> space = size + alignment + alignment;</div>
+<div class="line"> <span class="keyword">auto</span> allocatedMemPtr = std::malloc(space * <span class="keyword">sizeof</span>(<span class="keywordtype">size_t</span>));</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordflow">if</span> (std::align(alignment, size, allocatedMemPtr, space) == <span class="keyword">nullptr</span>)</div>
+<div class="line"> {</div>
+<div class="line"> <span class="keywordflow">throw</span> <a name="_a2"></a><a class="code" href="classarmnn_1_1_exception.xhtml">armnn::Exception</a>(<span class="stringliteral">&quot;SampleClBackendCustomAllocator::Alignment failed&quot;</span>);</div>
+<div class="line"> }</div>
+<div class="line"> <span class="keywordflow">return</span> allocatedMemPtr;</div>
+<div class="line"> }</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordtype">void</span> <a name="a3"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#a7dbeba9295a3f52ea54698c31e555dff">free</a>(<span class="keywordtype">void</span>* ptr)<span class="keyword"> override</span></div>
+<div class="line"><span class="keyword"> </span>{</div>
+<div class="line"> std::free(ptr);</div>
+<div class="line"> }</div>
+<div class="line"> </div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277">armnn::MemorySource</a> <a name="a4"></a><a class="code" href="classarmnn_1_1_i_custom_allocator.xhtml#abf02353c52af045a9af48bb40f857ad2">GetMemorySourceType</a>()<span class="keyword"> override</span></div>
+<div class="line"><span class="keyword"> </span>{</div>
+<div class="line"> <span class="keywordflow">return</span> <a name="a5"></a><a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">armnn::MemorySource::Malloc</a>;</div>
+<div class="line"> }</div>
+<div class="line">};</div>
+<div class="line"> </div>
+<div class="line"> </div>
+<div class="line"><span class="comment">// A simple example application to show the usage of a custom memory allocator. In this sample, the users single</span></div>
+<div class="line"><span class="comment">// input number is multiplied by 1.0f using a fully connected layer with a single neuron to produce an output</span></div>
+<div class="line"><span class="comment">// number that is the same as the input. All memory required to execute this mini network is allocated with</span></div>
+<div class="line"><span class="comment">// the provided custom allocator.</span></div>
+<div class="line"><span class="comment">//</span></div>
+<div class="line"><span class="comment">// Using a Custom Allocator is required for use with Protected Mode and Protected Memory.</span></div>
+<div class="line"><span class="comment">// This example is provided using only unprotected malloc as Protected Memory is platform</span></div>
+<div class="line"><span class="comment">// and implementation specific.</span></div>
+<div class="line"><span class="comment">//</span></div>
+<div class="line"><span class="comment">// Note: This example is similar to the SimpleSample application that can also be found in armnn/samples.</span></div>
+<div class="line"><span class="comment">// The differences are in the use of a custom allocator, the backend is GpuAcc, and the inputs/outputs</span></div>
+<div class="line"><span class="comment">// are being imported instead of copied. (Import must be enabled when using a Custom Allocator)</span></div>
+<div class="line"><span class="comment">// You might find this useful for comparison.</span></div>
+<div class="line"><span class="keywordtype">int</span> <a name="a6"></a><a class="code" href="_armnn_converter_8cpp.xhtml#a0ddf1224851353fc92bfbff6f499fa97">main</a>()</div>
+<div class="line">{</div>
+<div class="line"> <span class="keyword">using namespace </span><a class="code" href="namespacearmnn.xhtml">armnn</a>;</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordtype">float</span> number;</div>
+<div class="line"> std::cout &lt;&lt; <span class="stringliteral">&quot;Please enter a number: &quot;</span> &lt;&lt; std::endl;</div>
+<div class="line"> std::cin &gt;&gt; number;</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Turn on logging to standard output</span></div>
+<div class="line"> <span class="comment">// This is useful in this sample so that users can learn more about what is going on</span></div>
+<div class="line"> <a name="a7"></a><a class="code" href="namespacearmnn.xhtml#aa59f7a819c3e29d10ffc41e5c0616872">ConfigureLogging</a>(<span class="keyword">true</span>, <span class="keyword">false</span>, <a name="a8"></a><a class="code" href="namespacearmnn.xhtml#a93a3ba385cad27c4774e5fe64c025d3da4059b0251f66a18cb56f544728796875">LogSeverity::Info</a>);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Construct ArmNN network</span></div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#a0d8160388a127c1a23b37bc88dc6e2ec">NetworkId</a> networkIdentifier;</div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#ace74f6f9feb95a964a49d79458232703">INetworkPtr</a> network = <a name="a9"></a><a class="code" href="classarmnn_1_1_i_network.xhtml#a41ce159095e95f7cd4174ce5d4662697">INetwork::Create</a>();</div>
+<div class="line"> <a name="_a10"></a><a class="code" href="structarmnn_1_1_fully_connected_descriptor.xhtml">FullyConnectedDescriptor</a> fullyConnectedDesc;</div>
+<div class="line"> <span class="keywordtype">float</span> weightsData[] = {1.0f}; <span class="comment">// Identity</span></div>
+<div class="line"> <a name="_a11"></a><a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> weightsInfo(<a name="_a12"></a><a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), <a name="a13"></a><a class="code" href="namespacearmnn.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a166495adc0d0f53bee6baecc577f5204">DataType::Float32</a>, 0.0f, 0, <span class="keyword">true</span>);</div>
+<div class="line"> weightsInfo.SetConstant(<span class="keyword">true</span>);</div>
+<div class="line"> <a name="_a14"></a><a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> weights(weightsInfo, weightsData);</div>
+<div class="line"> </div>
+<div class="line"> <a name="_a15"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* inputLayer = network-&gt;AddInputLayer(0);</div>
+<div class="line"> <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* weightsLayer = network-&gt;AddConstantLayer(weights, <span class="stringliteral">&quot;Weights&quot;</span>);</div>
+<div class="line"> <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* fullyConnectedLayer =</div>
+<div class="line"> network-&gt;AddFullyConnectedLayer(fullyConnectedDesc, <span class="stringliteral">&quot;fully connected&quot;</span>);</div>
+<div class="line"> <a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml">IConnectableLayer</a>* outputLayer = network-&gt;AddOutputLayer(0);</div>
+<div class="line"> </div>
+<div class="line"> inputLayer-&gt;<a name="a16"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a name="a17"></a><a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(fullyConnectedLayer-&gt;<a name="a18"></a><a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(0));</div>
+<div class="line"> weightsLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(1));</div>
+<div class="line"> fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">Connect</a>(outputLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">GetInputSlot</a>(0));</div>
+<div class="line"> weightsLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a name="a19"></a><a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(weightsInfo);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Create ArmNN runtime:</span></div>
+<div class="line"> <span class="comment">//</span></div>
+<div class="line"> <span class="comment">// This is the interesting bit when executing a model with a custom allocator.</span></div>
+<div class="line"> <span class="comment">// You can have different allocators for different backends. To support this</span></div>
+<div class="line"> <span class="comment">// the runtime creation option has a map that takes a BackendId and the corresponding</span></div>
+<div class="line"> <span class="comment">// allocator that should be used for that backend.</span></div>
+<div class="line"> <span class="comment">// Only GpuAcc supports a Custom Allocator for now</span></div>
+<div class="line"> <span class="comment">//</span></div>
+<div class="line"> <span class="comment">// Note: This is not covered in this example but if you want to run a model on</span></div>
+<div class="line"> <span class="comment">// protected memory a custom allocator needs to be provided that supports</span></div>
+<div class="line"> <span class="comment">// protected memory allocations and the MemorySource of that allocator is</span></div>
+<div class="line"> <span class="comment">// set to MemorySource::DmaBufProtected</span></div>
+<div class="line"> <a name="_a20"></a><a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml">IRuntime::CreationOptions</a> options;</div>
+<div class="line"> <span class="keyword">auto</span> customAllocator = std::make_shared&lt;SampleClBackendCustomAllocator&gt;();</div>
+<div class="line"> options.<a name="a21"></a><a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a> = {{<span class="stringliteral">&quot;GpuAcc&quot;</span>, std::move(customAllocator)}};</div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#a150468a02bd7b2d2d061c4aaaee939f0">IRuntimePtr</a> runtime = <a name="a22"></a><a class="code" href="classarmnn_1_1_i_runtime.xhtml#ad44ecd3700748dc30dc4bbe34ba5bde7">IRuntime::Create</a>(options);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">//Set the tensors in the network.</span></div>
+<div class="line"> <a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> inputTensorInfo(<a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), <a class="code" href="namespacearmnn.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a166495adc0d0f53bee6baecc577f5204">DataType::Float32</a>);</div>
+<div class="line"> inputLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(inputTensorInfo);</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> numElements = inputTensorInfo.GetNumElements();</div>
+<div class="line"> <span class="keywordtype">size_t</span> totalBytes = numElements * <span class="keyword">sizeof</span>(float);</div>
+<div class="line"> </div>
+<div class="line"> <a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a> outputTensorInfo(<a class="code" href="classarmnn_1_1_tensor_shape.xhtml">TensorShape</a>({1, 1}), <a class="code" href="namespacearmnn.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a166495adc0d0f53bee6baecc577f5204">DataType::Float32</a>);</div>
+<div class="line"> fullyConnectedLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(outputTensorInfo);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Optimise ArmNN network</span></div>
+<div class="line"> <a name="_a23"></a><a class="code" href="structarmnn_1_1_optimizer_options.xhtml">OptimizerOptions</a> optOptions;</div>
+<div class="line"> optOptions.<a name="a24"></a><a class="code" href="structarmnn_1_1_optimizer_options.xhtml#a05c1bba6ba3ecc1339d4c4c10c0d8890">m_ImportEnabled</a> = <span class="keyword">true</span>;</div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#a674efcf6cbdb9e831d653ff0e821fb38">IOptimizedNetworkPtr</a> optNet =</div>
+<div class="line"> <a name="a25"></a><a class="code" href="namespacearmnn.xhtml#a2783360b253135639f4c63cfcaed6d48">Optimize</a>(*network, {<span class="stringliteral">&quot;GpuAcc&quot;</span>}, runtime-&gt;GetDeviceSpec(), optOptions);</div>
+<div class="line"> <span class="keywordflow">if</span> (!optNet)</div>
+<div class="line"> {</div>
+<div class="line"> <span class="comment">// This shouldn&#39;t happen for this simple sample, with GpuAcc backend.</span></div>
+<div class="line"> <span class="comment">// But in general usage Optimize could fail if the backend at runtime cannot</span></div>
+<div class="line"> <span class="comment">// support the model that has been provided.</span></div>
+<div class="line"> std::cerr &lt;&lt; <span class="stringliteral">&quot;Error: Failed to optimise the input network.&quot;</span> &lt;&lt; std::endl;</div>
+<div class="line"> <span class="keywordflow">return</span> 1;</div>
+<div class="line"> }</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Load graph into runtime</span></div>
+<div class="line"> std::string ignoredErrorMessage;</div>
+<div class="line"> <a name="_a26"></a><a class="code" href="structarmnn_1_1_i_network_properties.xhtml">INetworkProperties</a> networkProperties(<span class="keyword">false</span>, <a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">MemorySource::Malloc</a>, <a class="code" href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">MemorySource::Malloc</a>);</div>
+<div class="line"> runtime-&gt;LoadNetwork(networkIdentifier, std::move(optNet), ignoredErrorMessage, networkProperties);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Creates structures for input &amp; output</span></div>
+<div class="line"> <span class="keyword">const</span> <span class="keywordtype">size_t</span> alignment =</div>
+<div class="line"> arm_compute::CLKernelLibrary::get().get_device().getInfo&lt;CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE&gt;();</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordtype">void</span>* alignedInputPtr = options.<a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a>[<span class="stringliteral">&quot;GpuAcc&quot;</span>]-&gt;allocate(totalBytes, alignment);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Input with negative values</span></div>
+<div class="line"> <span class="keyword">auto</span>* inputPtr = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedInputPtr);</div>
+<div class="line"> std::fill_n(inputPtr, numElements, number);</div>
+<div class="line"> </div>
+<div class="line"> <span class="keywordtype">void</span>* alignedOutputPtr = options.<a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">m_CustomAllocatorMap</a>[<span class="stringliteral">&quot;GpuAcc&quot;</span>]-&gt;allocate(totalBytes, alignment);</div>
+<div class="line"> <span class="keyword">auto</span>* outputPtr = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedOutputPtr);</div>
+<div class="line"> std::fill_n(outputPtr, numElements, -10.0f);</div>
+<div class="line"> </div>
+<div class="line"> inputTensorInfo = runtime-&gt;GetInputTensorInfo(networkIdentifier, 0);</div>
+<div class="line"> inputTensorInfo.SetConstant(<span class="keyword">true</span>);</div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#aa01bce88f89975a5a031db4cc8861527">InputTensors</a> inputTensors</div>
+<div class="line"> {</div>
+<div class="line"> {0, <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a>(inputTensorInfo, alignedInputPtr)},</div>
+<div class="line"> };</div>
+<div class="line"> <a class="code" href="namespacearmnn.xhtml#a8f091a512915d1cb29a4ebf13dfc53ea">OutputTensors</a> outputTensors</div>
+<div class="line"> {</div>
+<div class="line"> {0, <a name="_a27"></a><a class="code" href="classarmnn_1_1_tensor.xhtml">Tensor</a>(runtime-&gt;GetOutputTensorInfo(networkIdentifier, 0), alignedOutputPtr)}</div>
+<div class="line"> };</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Execute network</span></div>
+<div class="line"> runtime-&gt;EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);</div>
+<div class="line"> </div>
+<div class="line"> <span class="comment">// Tell the CLBackend to sync memory so we can read the output.</span></div>
+<div class="line"> arm_compute::CLScheduler::get().sync();</div>
+<div class="line"> <span class="keyword">auto</span>* outputResult = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">float</span>*<span class="keyword">&gt;</span>(alignedOutputPtr);</div>
+<div class="line"> std::cout &lt;&lt; <span class="stringliteral">&quot;Your number was &quot;</span> &lt;&lt; outputResult[0] &lt;&lt; std::endl;</div>
+<div class="line"> runtime-&gt;UnloadNetwork(networkIdentifier);</div>
+<div class="line"> <span class="keywordflow">return</span> 0;</div>
+<div class="line"> </div>
+<div class="line">}</div>
+</div><!-- fragment --> </div><!-- contents -->
</div><!-- doc-content -->
+<div class="ttc" id="aclassarmnn_1_1_i_custom_allocator_xhtml_a6ce9dda753afa9abb498f3d6b673e595"><div class="ttname"><a href="classarmnn_1_1_i_custom_allocator.xhtml#a6ce9dda753afa9abb498f3d6b673e595">armnn::ICustomAllocator::allocate</a></div><div class="ttdeci">virtual void * allocate(size_t size, size_t alignment)=0</div><div class="ttdoc">Interface to be implemented by the child class to allocate bytes.</div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a674efcf6cbdb9e831d653ff0e821fb38"><div class="ttname"><a href="namespacearmnn.xhtml#a674efcf6cbdb9e831d653ff0e821fb38">armnn::IOptimizedNetworkPtr</a></div><div class="ttdeci">std::unique_ptr&lt; IOptimizedNetwork, void(*)(IOptimizedNetwork *network)&gt; IOptimizedNetworkPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_network_8hpp_source.xhtml#l00253">INetwork.hpp:253</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_exception_xhtml"><div class="ttname"><a href="classarmnn_1_1_exception.xhtml">armnn::Exception</a></div><div class="ttdoc">Base class for all ArmNN exceptions so that users can filter to just those.</div><div class="ttdef"><b>Definition:</b> <a href="_exceptions_8hpp_source.xhtml#l00046">Exceptions.hpp:46</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_connectable_layer_xhtml"><div class="ttname"><a href="classarmnn_1_1_i_connectable_layer.xhtml">armnn::IConnectableLayer</a></div><div class="ttdoc">Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.</div><div class="ttdef"><b>Definition:</b> <a href="_i_network_8hpp_source.xhtml#l00068">INetwork.hpp:68</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_fully_connected_descriptor_xhtml"><div class="ttname"><a href="structarmnn_1_1_fully_connected_descriptor.xhtml">armnn::FullyConnectedDescriptor</a></div><div class="ttdoc">A FullyConnectedDescriptor for the FullyConnectedLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_descriptors_8hpp_source.xhtml#l00475">Descriptors.hpp:475</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523"><div class="ttname"><a href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">armnn::MemorySource::Malloc</a></div><div class="ttdeci">@ Malloc</div></div>
+<div class="ttc" id="a_arm_n_n_8hpp_xhtml"><div class="ttname"><a href="_arm_n_n_8hpp.xhtml">ArmNN.hpp</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_const_tensor_xhtml"><div class="ttname"><a href="classarmnn_1_1_const_tensor.xhtml">armnn::ConstTensor</a></div><div class="ttdoc">A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00327">Tensor.hpp:327</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_aa59f7a819c3e29d10ffc41e5c0616872"><div class="ttname"><a href="namespacearmnn.xhtml#aa59f7a819c3e29d10ffc41e5c0616872">armnn::ConfigureLogging</a></div><div class="ttdeci">void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)</div><div class="ttdoc">Configures the logging behaviour of the ARMNN library.</div><div class="ttdef"><b>Definition:</b> <a href="_utils_8cpp_source.xhtml#l00018">Utils.cpp:18</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_i_runtime_1_1_creation_options_xhtml"><div class="ttname"><a href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml">armnn::IRuntime::CreationOptions</a></div><div class="ttdef"><b>Definition:</b> <a href="_i_runtime_8hpp_source.xhtml#l00085">IRuntime.hpp:85</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_connectable_layer_xhtml_a80ac4eda2e7f2757ec9dd96fc96dbd16"><div class="ttname"><a href="classarmnn_1_1_i_connectable_layer.xhtml#a80ac4eda2e7f2757ec9dd96fc96dbd16">armnn::IConnectableLayer::GetOutputSlot</a></div><div class="ttdeci">virtual const IOutputSlot &amp; GetOutputSlot(unsigned int index) const =0</div><div class="ttdoc">Get the const output slot handle by slot index.</div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a93a3ba385cad27c4774e5fe64c025d3da4059b0251f66a18cb56f544728796875"><div class="ttname"><a href="namespacearmnn.xhtml#a93a3ba385cad27c4774e5fe64c025d3da4059b0251f66a18cb56f544728796875">armnn::LogSeverity::Info</a></div><div class="ttdeci">@ Info</div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_output_slot_xhtml_ac1835f8756a9f03c02fcf9664e3a0fce"><div class="ttname"><a href="classarmnn_1_1_i_output_slot.xhtml#ac1835f8756a9f03c02fcf9664e3a0fce">armnn::IOutputSlot::Connect</a></div><div class="ttdeci">virtual int Connect(IInputSlot &amp;destination)=0</div></div>
+<div class="ttc" id="anamespacearmnn_xhtml"><div class="ttname"><a href="namespacearmnn.xhtml">armnn</a></div><div class="ttdoc">Copyright (c) 2021 ARM Limited and Contributors.</div><div class="ttdef"><b>Definition:</b> <a href="01__00__quick__start_8dox_source.xhtml#l00006">01_00_quick_start.dox:6</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_connectable_layer_xhtml_a6ec9e0eb66d7d6a01240492a0b18104c"><div class="ttname"><a href="classarmnn_1_1_i_connectable_layer.xhtml#a6ec9e0eb66d7d6a01240492a0b18104c">armnn::IConnectableLayer::GetInputSlot</a></div><div class="ttdeci">virtual const IInputSlot &amp; GetInputSlot(unsigned int index) const =0</div><div class="ttdoc">Get a const input slot handle by slot index.</div></div>
+<div class="ttc" id="astructarmnn_1_1_i_network_properties_xhtml"><div class="ttname"><a href="structarmnn_1_1_i_network_properties.xhtml">armnn::INetworkProperties</a></div><div class="ttdef"><b>Definition:</b> <a href="_i_runtime_8hpp_source.xhtml#l00043">IRuntime.hpp:43</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_tensor_shape_xhtml"><div class="ttname"><a href="classarmnn_1_1_tensor_shape.xhtml">armnn::TensorShape</a></div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00020">Tensor.hpp:20</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_aa01bce88f89975a5a031db4cc8861527"><div class="ttname"><a href="namespacearmnn.xhtml#aa01bce88f89975a5a031db4cc8861527">armnn::InputTensors</a></div><div class="ttdeci">std::vector&lt; std::pair&lt; LayerBindingId, class ConstTensor &gt; &gt; InputTensors</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00392">Tensor.hpp:392</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_custom_allocator_xhtml"><div class="ttname"><a href="classarmnn_1_1_i_custom_allocator.xhtml">armnn::ICustomAllocator</a></div><div class="ttdoc">Custom Allocator interface.</div><div class="ttdef"><b>Definition:</b> <a href="_i_custom_allocator_8hpp_source.xhtml#l00016">ICustomAllocator.hpp:16</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_ad8ed01ff3ff33333d8e19db4d2818bb6a166495adc0d0f53bee6baecc577f5204"><div class="ttname"><a href="namespacearmnn.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a166495adc0d0f53bee6baecc577f5204">armnn::DataType::Float32</a></div><div class="ttdeci">@ Float32</div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a14fcd7f88d11cea0a018269dca5f9277"><div class="ttname"><a href="namespacearmnn.xhtml#a14fcd7f88d11cea0a018269dca5f9277">armnn::MemorySource</a></div><div class="ttdeci">MemorySource</div><div class="ttdoc">Define the Memory Source to reduce copies.</div><div class="ttdef"><b>Definition:</b> <a href="_types_8hpp_source.xhtml#l00230">Types.hpp:230</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarmnn_1_1_tensor_info.xhtml">armnn::TensorInfo</a></div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00152">Tensor.hpp:152</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_custom_allocator_xhtml_abf02353c52af045a9af48bb40f857ad2"><div class="ttname"><a href="classarmnn_1_1_i_custom_allocator.xhtml#abf02353c52af045a9af48bb40f857ad2">armnn::ICustomAllocator::GetMemorySourceType</a></div><div class="ttdeci">virtual armnn::MemorySource GetMemorySourceType()=0</div><div class="ttdoc">Used to specify what type of memory is being allocated by this allocator.</div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a0d8160388a127c1a23b37bc88dc6e2ec"><div class="ttname"><a href="namespacearmnn.xhtml#a0d8160388a127c1a23b37bc88dc6e2ec">armnn::NetworkId</a></div><div class="ttdeci">int NetworkId</div><div class="ttdef"><b>Definition:</b> <a href="_i_runtime_8hpp_source.xhtml#l00035">IRuntime.hpp:35</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_network_xhtml_a41ce159095e95f7cd4174ce5d4662697"><div class="ttname"><a href="classarmnn_1_1_i_network.xhtml#a41ce159095e95f7cd4174ce5d4662697">armnn::INetwork::Create</a></div><div class="ttdeci">static INetworkPtr Create(const NetworkOptions &amp;networkOptions={})</div><div class="ttdef"><b>Definition:</b> <a href="_network_8cpp_source.xhtml#l00452">Network.cpp:452</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_output_slot_xhtml_a5ee4a6c9a2481245487b1b1a70d20fd0"><div class="ttname"><a href="classarmnn_1_1_i_output_slot.xhtml#a5ee4a6c9a2481245487b1b1a70d20fd0">armnn::IOutputSlot::SetTensorInfo</a></div><div class="ttdeci">virtual void SetTensorInfo(const TensorInfo &amp;tensorInfo)=0</div></div>
+<div class="ttc" id="a_i_custom_allocator_8hpp_xhtml"><div class="ttname"><a href="_i_custom_allocator_8hpp.xhtml">ICustomAllocator.hpp</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a8f091a512915d1cb29a4ebf13dfc53ea"><div class="ttname"><a href="namespacearmnn.xhtml#a8f091a512915d1cb29a4ebf13dfc53ea">armnn::OutputTensors</a></div><div class="ttdeci">std::vector&lt; std::pair&lt; LayerBindingId, class Tensor &gt; &gt; OutputTensors</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00393">Tensor.hpp:393</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_ace74f6f9feb95a964a49d79458232703"><div class="ttname"><a href="namespacearmnn.xhtml#ace74f6f9feb95a964a49d79458232703">armnn::INetworkPtr</a></div><div class="ttdeci">std::unique_ptr&lt; INetwork, void(*)(INetwork *network)&gt; INetworkPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_network_8hpp_source.xhtml#l00252">INetwork.hpp:252</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_i_runtime_1_1_creation_options_xhtml_a939528f239e70c85f833c87c5fe41d83"><div class="ttname"><a href="structarmnn_1_1_i_runtime_1_1_creation_options.xhtml#a939528f239e70c85f833c87c5fe41d83">armnn::IRuntime::CreationOptions::m_CustomAllocatorMap</a></div><div class="ttdeci">std::map&lt; BackendId, std::shared_ptr&lt; ICustomAllocator &gt; &gt; m_CustomAllocatorMap</div><div class="ttdoc">A map to define a custom memory allocator for specific backend Ids.</div><div class="ttdef"><b>Definition:</b> <a href="_i_runtime_8hpp_source.xhtml#l00122">IRuntime.hpp:122</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_runtime_xhtml_ad44ecd3700748dc30dc4bbe34ba5bde7"><div class="ttname"><a href="classarmnn_1_1_i_runtime.xhtml#ad44ecd3700748dc30dc4bbe34ba5bde7">armnn::IRuntime::Create</a></div><div class="ttdeci">static IRuntimePtr Create(const CreationOptions &amp;options)</div><div class="ttdef"><b>Definition:</b> <a href="_runtime_8cpp_source.xhtml#l00052">Runtime.cpp:52</a></div></div>
+<div class="ttc" id="a_armnn_converter_8cpp_xhtml_a0ddf1224851353fc92bfbff6f499fa97"><div class="ttname"><a href="_armnn_converter_8cpp.xhtml#a0ddf1224851353fc92bfbff6f499fa97">main</a></div><div class="ttdeci">int main(int argc, char *argv[])</div><div class="ttdef"><b>Definition:</b> <a href="_armnn_converter_8cpp_source.xhtml#l00327">ArmnnConverter.cpp:327</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_tensor_xhtml"><div class="ttname"><a href="classarmnn_1_1_tensor.xhtml">armnn::Tensor</a></div><div class="ttdoc">A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00319">Tensor.hpp:319</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a2783360b253135639f4c63cfcaed6d48"><div class="ttname"><a href="namespacearmnn.xhtml#a2783360b253135639f4c63cfcaed6d48">armnn::Optimize</a></div><div class="ttdeci">IOptimizedNetworkPtr Optimize(const INetwork &amp;network, const std::vector&lt; BackendId &gt; &amp;backendPreferences, const IDeviceSpec &amp;deviceSpec, const OptimizerOptions &amp;options=OptimizerOptions(), Optional&lt; std::vector&lt; std::string &gt; &amp; &gt; messages=EmptyOptional())</div><div class="ttdoc">Create an optimized version of the network.</div><div class="ttdef"><b>Definition:</b> <a href="_network_8cpp_source.xhtml#l01773">Network.cpp:1773</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_optimizer_options_xhtml"><div class="ttname"><a href="structarmnn_1_1_optimizer_options.xhtml">armnn::OptimizerOptions</a></div><div class="ttdoc">ArmNN performs an optimization on each model/network before it gets loaded for execution.</div><div class="ttdef"><b>Definition:</b> <a href="_i_network_8hpp_source.xhtml#l00137">INetwork.hpp:137</a></div></div>
+<div class="ttc" id="anamespacearmnn_xhtml_a150468a02bd7b2d2d061c4aaaee939f0"><div class="ttname"><a href="namespacearmnn.xhtml#a150468a02bd7b2d2d061c4aaaee939f0">armnn::IRuntimePtr</a></div><div class="ttdeci">std::unique_ptr&lt; IRuntime, void(*)(IRuntime *runtime)&gt; IRuntimePtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_runtime_8hpp_source.xhtml#l00041">IRuntime.hpp:41</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_custom_allocator_xhtml_a7dbeba9295a3f52ea54698c31e555dff"><div class="ttname"><a href="classarmnn_1_1_i_custom_allocator.xhtml#a7dbeba9295a3f52ea54698c31e555dff">armnn::ICustomAllocator::free</a></div><div class="ttdeci">virtual void free(void *ptr)=0</div><div class="ttdoc">Interface to be implemented by the child class to free the allocated bytes.</div></div>
+<div class="ttc" id="astructarmnn_1_1_optimizer_options_xhtml_a05c1bba6ba3ecc1339d4c4c10c0d8890"><div class="ttname"><a href="structarmnn_1_1_optimizer_options.xhtml#a05c1bba6ba3ecc1339d4c4c10c0d8890">armnn::OptimizerOptions::m_ImportEnabled</a></div><div class="ttdeci">bool m_ImportEnabled</div><div class="ttdoc">Enable Import.</div><div class="ttdef"><b>Definition:</b> <a href="_i_network_8hpp_source.xhtml#l00235">INetwork.hpp:235</a></div></div>
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
- <li class="footer">Generated on Fri Feb 24 2023 10:24:24 for ArmNN by
+ <li class="footer">Generated on Wed Mar 22 2023 15:52:59 for ArmNN by
<a href="http://www.doxygen.org/index.html">
- <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.13 </li>
+ <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
</ul>
</div>
</body>