aboutsummaryrefslogtreecommitdiff
path: root/21.02/_fuse_batch_norm_8hpp_source.xhtml
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-02-25 17:44:00 +0000
committerJan Eilers <jan.eilers@arm.com>2021-02-25 18:27:49 +0000
commitfd627ffaec8fd8801d980b4c91ee7c0607ab6aaf (patch)
treeeb4bc8f9b411f30c7655616142b5a4bdd3a1acd0 /21.02/_fuse_batch_norm_8hpp_source.xhtml
parentfb14ebbd68e04876809145296af96f6f41857418 (diff)
downloadarmnn-fd627ffaec8fd8801d980b4c91ee7c0607ab6aaf.tar.gz
IVGCVSW-5687 Update Doxygen Docu
* Update Doxygen Documentation for 21.02 release Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I9ed2f9caab038836ea99d7b378d7899fe431a4e5
Diffstat (limited to '21.02/_fuse_batch_norm_8hpp_source.xhtml')
-rw-r--r--21.02/_fuse_batch_norm_8hpp_source.xhtml153
1 files changed, 153 insertions, 0 deletions
diff --git a/21.02/_fuse_batch_norm_8hpp_source.xhtml b/21.02/_fuse_batch_norm_8hpp_source.xhtml
new file mode 100644
index 0000000000..337cfc9c62
--- /dev/null
+++ b/21.02/_fuse_batch_norm_8hpp_source.xhtml
@@ -0,0 +1,153 @@
+<!-- Copyright (c) 2020 ARM Limited. -->
+<!-- -->
+<!-- SPDX-License-Identifier: MIT -->
+<!-- -->
+<!-- HTML header for doxygen 1.8.13-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.13"/>
+<meta name="robots" content="NOINDEX, NOFOLLOW" />
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
+<title>ArmNN: src/armnn/optimizations/FuseBatchNorm.hpp Source File</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+ $(document).ready(initResizable);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/x-mathjax-config">
+ MathJax.Hub.Config({
+ extensions: ["tex2jax.js"],
+ jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+ <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 10rem; margin-top: .5rem; margin-left 10px"/>
+ <td style="padding-left: 0.5em;">
+ <div id="projectname">
+ &#160;<span id="projectnumber">21.02</span>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.13 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+<script type="text/javascript" src="menudata.js"></script>
+<script type="text/javascript" src="menu.js"></script>
+<script type="text/javascript">
+$(function() {
+ initMenu('',true,false,'search.php','Search');
+ $(document).ready(function() { init_search(); });
+});
+</script>
+<div id="main-nav"></div>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+ <div id="nav-tree">
+ <div id="nav-tree-contents">
+ <div id="nav-sync" class="sync"></div>
+ </div>
+ </div>
+ <div id="splitbar" style="-moz-user-select:none;"
+ class="ui-resizable-handle">
+ </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('_fuse_batch_norm_8hpp_source.xhtml','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+ onmouseover="return searchBox.OnSearchSelectShow()"
+ onmouseout="return searchBox.OnSearchSelectHide()"
+ onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0"
+ name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+ <div class="headertitle">
+<div class="title">FuseBatchNorm.hpp</div> </div>
+</div><!--header-->
+<div class="contents">
+<a href="_fuse_batch_norm_8hpp.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">//</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment">// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment">// SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment">//</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;</div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="preprocessor">#pragma once</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;</div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_optimization_8hpp.xhtml">Optimization.hpp</a>&quot;</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_data_layout_indexed_8hpp.xhtml">armnnUtils/DataLayoutIndexed.hpp</a>&gt;</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_resolve_type_8hpp.xhtml">ResolveType.hpp</a>&gt;</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;</div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearmnn.xhtml">armnn</a></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;{</div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="keyword">namespace </span>optimizations</div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;{</div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;</div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> ConvLayer, <a class="code" href="namespacearmnn.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6">armnn::DataType</a> ArmnnType,</div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160; <span class="keyword">typename</span> T = <a class="code" href="namespacearmnn.xhtml#a0743ed5e860c316a20b68ca96301b411">armnn::ResolveType&lt;ArmnnType&gt;</a>&gt;</div><div class="line"><a name="l00019"></a><span class="lineno"><a class="line" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml"> 19</a></span>&#160;<span class="keyword">class </span><a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml">FuseBatchNorm</a></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;{</div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="keyword">public</span>:<span class="comment"></span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> /// Run for every exclusive connection between any base Convolution layer and a child BatchNorm layer for not</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment"> /// quantized layers.</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="comment"> /// The child will be removed, the base will be removed if it&#39;s left unconnected. A new Convolution layer will</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="comment"> /// be added, its weights and bias will be calculated using the weights and bias of the base Convolution layer</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="comment"> /// combined with the parameters of the child BatchNorm layer.</span></div><div class="line"><a name="l00027"></a><span class="lineno"><a class="line" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#a5a8476ffc04ce7460bb09ad50d1d23de"> 27</a></span>&#160;<span class="comment"></span> <span class="keywordtype">void</span> <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#a5a8476ffc04ce7460bb09ad50d1d23de">Run</a>(<a class="code" href="classarmnn_1_1_graph.xhtml">Graph</a>&amp; graph, <a class="code" href="classarmnn_1_1_input_slot.xhtml">InputSlot</a>&amp; connection)<span class="keyword"> const</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="keyword"> </span>{</div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160; <a class="code" href="classarmnn_1_1_layer.xhtml">Layer</a>&amp; base = connection.<a class="code" href="classarmnn_1_1_input_slot.xhtml#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.xhtml#a7ddaf04177053a536f0e7be83a642bc6">GetOwningLayer</a>();</div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160; <a class="code" href="classarmnn_1_1_layer.xhtml">Layer</a>&amp; child = connection.<a class="code" href="classarmnn_1_1_input_slot.xhtml#a7ddaf04177053a536f0e7be83a642bc6">GetOwningLayer</a>();</div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;</div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160; <span class="keywordtype">bool</span> depthwise = (base.<a class="code" href="classarmnn_1_1_layer.xhtml#ad8e15c530c929ab823d89ae9fd2d3f11">GetType</a>() == <a class="code" href="namespacearmnn.xhtml#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">LayerType::DepthwiseConvolution2d</a>);</div><div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160;</div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160; <a class="code" href="_assert_8hpp.xhtml#a5698be69cbd5dfe6c28fcd9867e8cbed">ARMNN_ASSERT</a>(base.<a class="code" href="classarmnn_1_1_layer.xhtml#ad8e15c530c929ab823d89ae9fd2d3f11">GetType</a>() == <a class="code" href="namespacearmnn.xhtml#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">LayerType::Convolution2d</a> || depthwise);</div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160; <a class="code" href="_assert_8hpp.xhtml#a5698be69cbd5dfe6c28fcd9867e8cbed">ARMNN_ASSERT</a>(child.<a class="code" href="classarmnn_1_1_layer.xhtml#ad8e15c530c929ab823d89ae9fd2d3f11">GetType</a>() == <a class="code" href="namespacearmnn.xhtml#a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e">LayerType::BatchNormalization</a>);</div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;</div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160; <span class="keywordflow">if</span> (base.<a class="code" href="classarmnn_1_1_layer.xhtml#aea909c7327109228ef618d459015def3">GetDataType</a>() == ArmnnType &amp;&amp; child.<a class="code" href="classarmnn_1_1_layer.xhtml#aea909c7327109228ef618d459015def3">GetDataType</a>() == ArmnnType)</div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160; {</div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160; <a class="code" href="classarmnn_1_1_output_slot.xhtml">OutputSlot</a>* parentOut = base.<a class="code" href="classarmnn_1_1_layer.xhtml#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.xhtml#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>();</div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160; <span class="keyword">auto</span> convLayer = PolymorphicDowncast&lt;ConvLayer*&gt;(&amp;base);</div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160; <span class="keyword">auto</span> batchNormLayer = PolymorphicDowncast&lt;BatchNormalizationLayer*&gt;(&amp;child);</div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160; <span class="comment">// Read convolution and batch norm parameters</span></div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160; <a class="code" href="structarmnn_1_1_batch_normalization_descriptor.xhtml">BatchNormalizationDescriptor</a> batchNormDescriptor = batchNormLayer-&gt;GetParameters();</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160; <span class="keyword">auto</span> epsilon = batchNormDescriptor.<a class="code" href="structarmnn_1_1_batch_normalization_descriptor.xhtml#a11c821c7524251004a72ed13c510853c">m_Eps</a>;</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160; <a class="code" href="namespacearmnn.xhtml#a44affeeb090c3c6a3062830562672e84">IgnoreUnused</a>(epsilon);</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160;</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> betaTensor(batchNormLayer-&gt;m_Beta-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Beta-&gt;Map(<span class="keyword">true</span>));</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> gammaTensor(batchNormLayer-&gt;m_Gamma-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Gamma-&gt;Map(<span class="keyword">true</span>));</div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> meanTensor(batchNormLayer-&gt;m_Mean-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Mean-&gt;Map(<span class="keyword">true</span>));</div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> varTensor(batchNormLayer-&gt;m_Variance-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Variance-&gt;Map(<span class="keyword">true</span>));</div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160; <span class="keyword">auto</span> convDescriptor = convLayer-&gt;GetParameters();</div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160; <span class="keyword">auto</span> weightsInfo(convLayer-&gt;m_Weight-&gt;GetTensorInfo());</div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> weightsTensor(weightsInfo, convLayer-&gt;m_Weight-&gt;Map(<span class="keyword">true</span>));</div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160;</div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; <a class="code" href="classarmnn_utils_1_1_data_layout_indexed.xhtml">armnnUtils::DataLayoutIndexed</a> dataLayout(convDescriptor.m_DataLayout);</div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; <span class="keyword">auto</span> weightsShape = weightsInfo.GetShape();</div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> depthMultiplier = depthwise ? weightsShape[0] : 1;</div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> inputChannels = depthwise ? weightsShape[1] :</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160; weightsShape[dataLayout.GetChannelsIndex()];</div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> outputChannels = depthwise ? inputChannels * depthMultiplier : weightsShape[0];</div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> weightsHeight = depthwise ? weightsShape[2] :</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; weightsShape[dataLayout.GetHeightIndex()];</div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> weightsWidth = depthwise ? weightsShape[3] :</div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; weightsShape[dataLayout.GetWidthIndex()];</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* weightsBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(weightsTensor.GetMemoryArea());</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* betaBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(betaTensor.GetMemoryArea());</div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* gammaBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(gammaTensor.GetMemoryArea());</div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* meanBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(meanTensor.GetMemoryArea());</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* varBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(varTensor.GetMemoryArea());</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; std::vector&lt;T&gt; weightsVector (weightsBuffer, weightsBuffer + weightsTensor.GetNumElements());</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160; std::vector&lt;T&gt; betaVector (betaBuffer, betaBuffer + betaTensor.GetNumElements());</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; std::vector&lt;T&gt; gammaVector (gammaBuffer, gammaBuffer + gammaTensor.GetNumElements());</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; std::vector&lt;T&gt; meanVector (meanBuffer, meanBuffer + meanTensor.GetNumElements());</div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; std::vector&lt;T&gt; varianceVector(varBuffer, varBuffer + varTensor.GetNumElements());</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160;</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; <span class="comment">// fusedWeights = ( gamma * weights ) / ( std - epsilon);</span></div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; std::vector&lt;T&gt; fusedWeightsVector(weightsVector.size());</div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> depthwiseMultiplierIdx = 0;</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;</div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cInput = 0; cInput &lt; inputChannels; ++cInput)</div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; {</div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cOut = 0; cOut &lt; outputChannels; ++cOut)</div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; {</div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; T mult = gammaVector[cOut] / <span class="keyword">static_cast&lt;</span>T<span class="keyword">&gt;</span>(sqrtf (varianceVector[cOut] + epsilon));</div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160;</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; <span class="keywordflow">if</span> (depthwise)</div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; {</div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; cInput = cOut / depthMultiplier;</div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; depthwiseMultiplierIdx = cOut % depthMultiplier;</div><div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; }</div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160;</div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> h = 0; h &lt; weightsHeight; ++h)</div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; {</div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> w = 0; w &lt; weightsWidth; ++w)</div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; {</div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> weightsIdx = 0;</div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;</div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; <span class="keywordflow">if</span> (depthwise)</div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; {</div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; weightsIdx = depthwiseMultiplierIdx * weightsWidth * weightsHeight * inputChannels +</div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; cInput * weightsWidth * weightsHeight +</div><div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160; h * weightsWidth +</div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; w;</div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; }</div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; <span class="keywordflow">else</span> <span class="keywordflow">if</span> (convDescriptor.m_DataLayout == <a class="code" href="namespacearmnn.xhtml#ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51">DataLayout::NHWC</a>)</div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; {</div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; weightsIdx = cOut * weightsHeight * weightsWidth * inputChannels +</div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; h * weightsWidth * inputChannels +</div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160; w * inputChannels +</div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160; cInput;</div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; }</div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; {</div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160; weightsIdx = cOut * weightsWidth * weightsHeight * inputChannels +</div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; cInput * weightsWidth * weightsHeight +</div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; h * weightsWidth +</div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; w;</div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; }</div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; fusedWeightsVector[weightsIdx] = mult * weightsVector[weightsIdx];</div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; }</div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; }</div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; }</div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; }</div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> fusedWeightsTensor(weightsInfo, fusedWeightsVector);</div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160;</div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; <span class="comment">// fusedBias = (gamma * (bias - mean)) / (variance - epsilon) + beta;</span></div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; std::vector&lt;T&gt; fusedBiasVector(outputChannels);</div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; <span class="keywordflow">if</span> (convDescriptor.m_BiasEnabled)</div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; {</div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; <a class="code" href="_assert_8hpp.xhtml#a91c4dfde57907d7698c7531785690a7f">ARMNN_ASSERT_MSG</a>(convLayer-&gt;m_Bias != <span class="keyword">nullptr</span>,</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; <span class="stringliteral">&quot;FuseBatchNorm: Bias data should not be null if bias is enabled.&quot;</span>);</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160;</div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> biasTensor(convLayer-&gt;m_Bias-&gt;GetTensorInfo(), convLayer-&gt;m_Bias-&gt;Map(<span class="keyword">true</span>));</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* biasBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(biasTensor.GetMemoryArea());</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; std::vector&lt;T&gt; biasVector(biasBuffer, biasBuffer + biasTensor.GetNumElements());</div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160;</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cOut = 0; cOut &lt; outputChannels; ++cOut)</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; {</div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; fusedBiasVector[cOut] = ((gammaVector[cOut] * (biasVector[cOut] - meanVector[cOut])) /</div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut];</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; }</div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; }</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; {</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; convDescriptor.m_BiasEnabled = <span class="keyword">true</span>;</div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; std::vector&lt;T&gt; biasVector(outputChannels, T(0));</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160;</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cOut = 0; cOut &lt; outputChannels; ++cOut)</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; {</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; fusedBiasVector[cOut] = ((gammaVector[cOut] * (biasVector[cOut] - meanVector[cOut])) /</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut];</div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; }</div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; }</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a> fusedBiasTensor(<a class="code" href="classarmnn_1_1_tensor_info.xhtml">TensorInfo</a>({outputChannels}, ArmnnType), fusedBiasVector);</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160;</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; <span class="comment">// Insert the new convolution layer that has batch norm parameters fused into</span></div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; <span class="keyword">const</span> std::string name = std::string(<span class="stringliteral">&quot;fused-&quot;</span>) + child.<a class="code" href="classarmnn_1_1_layer.xhtml#a7ddf0cf6f620d59c10e63495ace795d0">GetName</a>() + std::string(<span class="stringliteral">&quot;-into-&quot;</span>) + base.<a class="code" href="classarmnn_1_1_layer.xhtml#a7ddf0cf6f620d59c10e63495ace795d0">GetName</a>();</div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; <span class="keyword">auto</span>&amp; newConv2dLayer = *graph.<a class="code" href="classarmnn_1_1_graph.xhtml#a3ff30c6669fdc69de1f5be1f89bacc3f">InsertNewLayer</a>&lt;ConvLayer&gt;(base.<a class="code" href="classarmnn_1_1_layer.xhtml#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0),</div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; convDescriptor,</div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; name.c_str());</div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; newConv2dLayer.m_Weight = std::make_unique&lt;ScopedCpuTensorHandle&gt;(fusedWeightsTensor);</div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; newConv2dLayer.m_Bias = std::make_unique&lt;ScopedCpuTensorHandle&gt;(<a class="code" href="classarmnn_1_1_const_tensor.xhtml">ConstTensor</a>(fusedBiasTensor));</div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160;</div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; <span class="comment">// Reconnects with original parent.</span></div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; newConv2dLayer.GetOutputSlot().MoveAllConnections(*parentOut);</div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; <span class="comment">// Parent is now the new convolution2d layer.</span></div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; parentOut = &amp;newConv2dLayer.GetOutputSlot();</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160;</div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <span class="comment">// Moves connections in child output to parent layer.</span></div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; <span class="comment">// Child layer will be removed as it&#39;s left unconnected.</span></div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="comment">// Base layer will be removed if left unconnected.</span></div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; child.<a class="code" href="classarmnn_1_1_layer.xhtml#a0e36688a43c35668d8db5257274c68fe">GetOutputSlot</a>().<a class="code" href="classarmnn_1_1_output_slot.xhtml#a19d30f83e90f2612e6aec510715f790d">MoveAllConnections</a>(*parentOut);</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; }</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; }</div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160;<span class="keyword">protected</span>:</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#abe49327783cb8bdc12c085c987db14db">FuseBatchNorm</a>() = <span class="keywordflow">default</span>;</div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#a0ff9a790927b898d90261a8ea0e479e6">~FuseBatchNorm</a>() = <span class="keywordflow">default</span>;</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160;};</div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160;</div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160;<span class="keyword">using</span> <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">FuseBatchNormIntoConvolution2DFloat32</a> =</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">OptimizeForExclusiveConnection</a>&lt;<a class="code" href="classarmnn_1_1_convolution2d_layer.xhtml">Convolution2dLayer</a>,</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; <a class="code" href="classarmnn_1_1_batch_normalization_layer.xhtml">BatchNormalizationLayer</a>,</div><div class="line"><a name="l00187"></a><span class="lineno"><a class="line" href="namespacearmnn_1_1optimizations.xhtml#aa52c06792e18dc13030e82476f706f9e"> 187</a></span>&#160; <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml">FuseBatchNorm&lt;Convolution2dLayer, armnn::DataType::Float32&gt;</a>&gt;;</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160;</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160;<span class="keyword">using</span> <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">FuseBatchNormIntoConvolution2DFloat16</a> =</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">OptimizeForExclusiveConnection</a>&lt;Convolution2dLayer,</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; BatchNormalizationLayer,</div><div class="line"><a name="l00192"></a><span class="lineno"><a class="line" href="namespacearmnn_1_1optimizations.xhtml#a8a81178ddcebb93ec0c35b6e6284273c"> 192</a></span>&#160; <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml">FuseBatchNorm&lt;Convolution2dLayer, armnn::DataType::Float16&gt;</a>&gt;;</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160;</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160;<span class="keyword">using</span> <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">FuseBatchNormIntoDepthwiseConvolution2DFloat32</a> =</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">OptimizeForExclusiveConnection</a>&lt;<a class="code" href="classarmnn_1_1_depthwise_convolution2d_layer.xhtml">DepthwiseConvolution2dLayer</a>,</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; BatchNormalizationLayer,</div><div class="line"><a name="l00197"></a><span class="lineno"><a class="line" href="namespacearmnn_1_1optimizations.xhtml#a56e54a818166a2f4b2c1a7f76a3629ff"> 197</a></span>&#160; <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml">FuseBatchNorm&lt;DepthwiseConvolution2dLayer, armnn::DataType::Float32&gt;</a>&gt;;</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160;</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160;<span class="keyword">using</span> <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">FuseBatchNormIntoDepthwiseConvolution2DFloat16</a> =</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; <a class="code" href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">OptimizeForExclusiveConnection</a>&lt;DepthwiseConvolution2dLayer,</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; BatchNormalizationLayer,</div><div class="line"><a name="l00202"></a><span class="lineno"><a class="line" href="namespacearmnn_1_1optimizations.xhtml#ab40bb51feca46649eb9d00522bfe51f6"> 202</a></span>&#160; <a class="code" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml">FuseBatchNorm&lt;DepthwiseConvolution2dLayer, armnn::DataType::Float16&gt;</a>&gt;;</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160;</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160;} <span class="comment">// namespace optimizations</span></div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160;} <span class="comment">// namespace armnn</span></div><div class="ttc" id="classarmnn_1_1_batch_normalization_layer_xhtml"><div class="ttname"><a href="classarmnn_1_1_batch_normalization_layer.xhtml">armnn::BatchNormalizationLayer</a></div><div class="ttdoc">This layer represents a batch normalization operation. </div><div class="ttdef"><b>Definition:</b> <a href="_batch_normalization_layer_8hpp_source.xhtml#l00015">BatchNormalizationLayer.hpp:15</a></div></div>
+<div class="ttc" id="_data_layout_indexed_8hpp_xhtml"><div class="ttname"><a href="_data_layout_indexed_8hpp.xhtml">DataLayoutIndexed.hpp</a></div></div>
+<div class="ttc" id="classarmnn_1_1optimizations_1_1_fuse_batch_norm_xhtml"><div class="ttname"><a href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml">armnn::optimizations::FuseBatchNorm</a></div><div class="ttdef"><b>Definition:</b> <a href="_fuse_batch_norm_8hpp_source.xhtml#l00019">FuseBatchNorm.hpp:19</a></div></div>
+<div class="ttc" id="classarmnn_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarmnn_1_1_tensor_info.xhtml">armnn::TensorInfo</a></div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00152">Tensor.hpp:152</a></div></div>
+<div class="ttc" id="classarmnn_1_1_depthwise_convolution2d_layer_xhtml"><div class="ttname"><a href="classarmnn_1_1_depthwise_convolution2d_layer.xhtml">armnn::DepthwiseConvolution2dLayer</a></div><div class="ttdoc">This layer represents a depthwise convolution 2d operation. </div><div class="ttdef"><b>Definition:</b> <a href="_depthwise_convolution2d_layer_8hpp_source.xhtml#l00015">DepthwiseConvolution2dLayer.hpp:15</a></div></div>
+<div class="ttc" id="classarmnn_1_1_output_slot_xhtml_a7ddaf04177053a536f0e7be83a642bc6"><div class="ttname"><a href="classarmnn_1_1_output_slot.xhtml#a7ddaf04177053a536f0e7be83a642bc6">armnn::OutputSlot::GetOwningLayer</a></div><div class="ttdeci">Layer &amp; GetOwningLayer() const</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00115">Layer.hpp:115</a></div></div>
+<div class="ttc" id="structarmnn_1_1_batch_normalization_descriptor_xhtml_a11c821c7524251004a72ed13c510853c"><div class="ttname"><a href="structarmnn_1_1_batch_normalization_descriptor.xhtml#a11c821c7524251004a72ed13c510853c">armnn::BatchNormalizationDescriptor::m_Eps</a></div><div class="ttdeci">float m_Eps</div><div class="ttdoc">Value to add to the variance. Used to avoid dividing by zero. </div><div class="ttdef"><b>Definition:</b> <a href="_descriptors_8hpp_source.xhtml#l00639">Descriptors.hpp:639</a></div></div>
+<div class="ttc" id="classarmnn_1_1optimizations_1_1_fuse_batch_norm_xhtml_abe49327783cb8bdc12c085c987db14db"><div class="ttname"><a href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#abe49327783cb8bdc12c085c987db14db">armnn::optimizations::FuseBatchNorm::FuseBatchNorm</a></div><div class="ttdeci">FuseBatchNorm()=default</div></div>
+<div class="ttc" id="namespacearmnn_xhtml_a0743ed5e860c316a20b68ca96301b411"><div class="ttname"><a href="namespacearmnn.xhtml#a0743ed5e860c316a20b68ca96301b411">armnn::ResolveType</a></div><div class="ttdeci">typename ResolveTypeImpl&lt; DT &gt;::Type ResolveType</div><div class="ttdef"><b>Definition:</b> <a href="_resolve_type_8hpp_source.xhtml#l00073">ResolveType.hpp:73</a></div></div>
+<div class="ttc" id="_resolve_type_8hpp_xhtml"><div class="ttname"><a href="_resolve_type_8hpp.xhtml">ResolveType.hpp</a></div></div>
+<div class="ttc" id="classarmnn_1_1_input_slot_xhtml"><div class="ttname"><a href="classarmnn_1_1_input_slot.xhtml">armnn::InputSlot</a></div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00041">Layer.hpp:41</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml"><div class="ttname"><a href="namespacearmnn.xhtml">armnn</a></div><div class="ttdoc">Copyright (c) 2021 ARM Limited and Contributors. </div><div class="ttdef"><b>Definition:</b> <a href="01__00__software__tools_8dox_source.xhtml#l00006">01_00_software_tools.dox:6</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml_a44affeeb090c3c6a3062830562672e84"><div class="ttname"><a href="namespacearmnn.xhtml#a44affeeb090c3c6a3062830562672e84">armnn::IgnoreUnused</a></div><div class="ttdeci">void IgnoreUnused(Ts &amp;&amp;...)</div><div class="ttdef"><b>Definition:</b> <a href="_ignore_unused_8hpp_source.xhtml#l00014">IgnoreUnused.hpp:14</a></div></div>
+<div class="ttc" id="classarmnn_1_1optimizations_1_1_fuse_batch_norm_xhtml_a0ff9a790927b898d90261a8ea0e479e6"><div class="ttname"><a href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#a0ff9a790927b898d90261a8ea0e479e6">armnn::optimizations::FuseBatchNorm::~FuseBatchNorm</a></div><div class="ttdeci">~FuseBatchNorm()=default</div></div>
+<div class="ttc" id="classarmnn_1_1_layer_xhtml_acf8b8e23bf647836592982f97088d375"><div class="ttname"><a href="classarmnn_1_1_layer.xhtml#acf8b8e23bf647836592982f97088d375">armnn::Layer::GetInputSlot</a></div><div class="ttdeci">const InputSlot &amp; GetInputSlot(unsigned int index) const override</div><div class="ttdoc">Get a const input slot handle by slot index. </div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00316">Layer.hpp:316</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml_ad8ed01ff3ff33333d8e19db4d2818bb6"><div class="ttname"><a href="namespacearmnn.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6">armnn::DataType</a></div><div class="ttdeci">DataType</div><div class="ttdef"><b>Definition:</b> <a href="_types_8hpp_source.xhtml#l00032">Types.hpp:32</a></div></div>
+<div class="ttc" id="_assert_8hpp_xhtml_a91c4dfde57907d7698c7531785690a7f"><div class="ttname"><a href="_assert_8hpp.xhtml#a91c4dfde57907d7698c7531785690a7f">ARMNN_ASSERT_MSG</a></div><div class="ttdeci">#define ARMNN_ASSERT_MSG(COND, MSG)</div><div class="ttdef"><b>Definition:</b> <a href="_assert_8hpp_source.xhtml#l00015">Assert.hpp:15</a></div></div>
+<div class="ttc" id="classarmnn_utils_1_1_data_layout_indexed_xhtml"><div class="ttname"><a href="classarmnn_utils_1_1_data_layout_indexed.xhtml">armnnUtils::DataLayoutIndexed</a></div><div class="ttdoc">Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...</div><div class="ttdef"><b>Definition:</b> <a href="_data_layout_indexed_8hpp_source.xhtml#l00017">DataLayoutIndexed.hpp:17</a></div></div>
+<div class="ttc" id="classarmnn_1_1_const_tensor_xhtml"><div class="ttname"><a href="classarmnn_1_1_const_tensor.xhtml">armnn::ConstTensor</a></div><div class="ttdoc">A tensor defined by a TensorInfo (shape and data type) and an immutable backing store. </div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.xhtml#l00314">Tensor.hpp:314</a></div></div>
+<div class="ttc" id="classarmnn_1_1_layer_xhtml_ad8e15c530c929ab823d89ae9fd2d3f11"><div class="ttname"><a href="classarmnn_1_1_layer.xhtml#ad8e15c530c929ab823d89ae9fd2d3f11">armnn::Layer::GetType</a></div><div class="ttdeci">LayerType GetType() const override</div><div class="ttdoc">Returns the armnn::LayerType of this layer. </div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00265">Layer.hpp:265</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml_a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a"><div class="ttname"><a href="namespacearmnn.xhtml#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">armnn::LayerType::Convolution2d</a></div></div>
+<div class="ttc" id="classarmnn_1_1_output_slot_xhtml"><div class="ttname"><a href="classarmnn_1_1_output_slot.xhtml">armnn::OutputSlot</a></div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00083">Layer.hpp:83</a></div></div>
+<div class="ttc" id="_assert_8hpp_xhtml_a5698be69cbd5dfe6c28fcd9867e8cbed"><div class="ttname"><a href="_assert_8hpp.xhtml#a5698be69cbd5dfe6c28fcd9867e8cbed">ARMNN_ASSERT</a></div><div class="ttdeci">#define ARMNN_ASSERT(COND)</div><div class="ttdef"><b>Definition:</b> <a href="_assert_8hpp_source.xhtml#l00014">Assert.hpp:14</a></div></div>
+<div class="ttc" id="classarmnn_1_1_input_slot_xhtml_a9effd325a6d512a3f8ff4bd207d53255"><div class="ttname"><a href="classarmnn_1_1_input_slot.xhtml#a9effd325a6d512a3f8ff4bd207d53255">armnn::InputSlot::GetConnectedOutputSlot</a></div><div class="ttdeci">const OutputSlot * GetConnectedOutputSlot() const</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00055">Layer.hpp:55</a></div></div>
+<div class="ttc" id="classarmnn_1_1optimizations_1_1_fuse_batch_norm_xhtml_a5a8476ffc04ce7460bb09ad50d1d23de"><div class="ttname"><a href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.xhtml#a5a8476ffc04ce7460bb09ad50d1d23de">armnn::optimizations::FuseBatchNorm::Run</a></div><div class="ttdeci">void Run(Graph &amp;graph, InputSlot &amp;connection) const</div><div class="ttdoc">Run for every exclusive connection between any base Convolution layer and a child BatchNorm layer for...</div><div class="ttdef"><b>Definition:</b> <a href="_fuse_batch_norm_8hpp_source.xhtml#l00027">FuseBatchNorm.hpp:27</a></div></div>
+<div class="ttc" id="classarmnn_1_1_input_slot_xhtml_a7ddaf04177053a536f0e7be83a642bc6"><div class="ttname"><a href="classarmnn_1_1_input_slot.xhtml#a7ddaf04177053a536f0e7be83a642bc6">armnn::InputSlot::GetOwningLayer</a></div><div class="ttdeci">Layer &amp; GetOwningLayer() const</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00052">Layer.hpp:52</a></div></div>
+<div class="ttc" id="classarmnn_1_1_graph_xhtml"><div class="ttname"><a href="classarmnn_1_1_graph.xhtml">armnn::Graph</a></div><div class="ttdef"><b>Definition:</b> <a href="_graph_8hpp_source.xhtml#l00029">Graph.hpp:29</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml_a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7"><div class="ttname"><a href="namespacearmnn.xhtml#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">armnn::LayerType::DepthwiseConvolution2d</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml_a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e"><div class="ttname"><a href="namespacearmnn.xhtml#a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e">armnn::LayerType::BatchNormalization</a></div></div>
+<div class="ttc" id="classarmnn_1_1_layer_xhtml_aea909c7327109228ef618d459015def3"><div class="ttname"><a href="classarmnn_1_1_layer.xhtml#aea909c7327109228ef618d459015def3">armnn::Layer::GetDataType</a></div><div class="ttdeci">DataType GetDataType() const</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8cpp_source.xhtml#l00283">Layer.cpp:283</a></div></div>
+<div class="ttc" id="classarmnn_1_1_layer_xhtml_a0e36688a43c35668d8db5257274c68fe"><div class="ttname"><a href="classarmnn_1_1_layer.xhtml#a0e36688a43c35668d8db5257274c68fe">armnn::Layer::GetOutputSlot</a></div><div class="ttdeci">const OutputSlot &amp; GetOutputSlot(unsigned int index=0) const override</div><div class="ttdoc">Get the const output slot handle by slot index. </div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00318">Layer.hpp:318</a></div></div>
+<div class="ttc" id="classarmnn_1_1_optimize_for_exclusive_connection_xhtml"><div class="ttname"><a href="classarmnn_1_1_optimize_for_exclusive_connection.xhtml">armnn::OptimizeForExclusiveConnection</a></div><div class="ttdef"><b>Definition:</b> <a href="_optimization_8hpp_source.xhtml#l00173">Optimization.hpp:173</a></div></div>
+<div class="ttc" id="classarmnn_1_1_layer_xhtml_a7ddf0cf6f620d59c10e63495ace795d0"><div class="ttname"><a href="classarmnn_1_1_layer.xhtml#a7ddf0cf6f620d59c10e63495ace795d0">armnn::Layer::GetName</a></div><div class="ttdeci">const char * GetName() const override</div><div class="ttdoc">Returns the name of the layer. </div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00311">Layer.hpp:311</a></div></div>
+<div class="ttc" id="classarmnn_1_1_convolution2d_layer_xhtml"><div class="ttname"><a href="classarmnn_1_1_convolution2d_layer.xhtml">armnn::Convolution2dLayer</a></div><div class="ttdoc">This layer represents a convolution 2d operation. </div><div class="ttdef"><b>Definition:</b> <a href="_convolution2d_layer_8hpp_source.xhtml#l00015">Convolution2dLayer.hpp:15</a></div></div>
+<div class="ttc" id="_optimization_8hpp_xhtml"><div class="ttname"><a href="_optimization_8hpp.xhtml">Optimization.hpp</a></div></div>
+<div class="ttc" id="classarmnn_1_1_graph_xhtml_a3ff30c6669fdc69de1f5be1f89bacc3f"><div class="ttname"><a href="classarmnn_1_1_graph.xhtml#a3ff30c6669fdc69de1f5be1f89bacc3f">armnn::Graph::InsertNewLayer</a></div><div class="ttdeci">LayerT * InsertNewLayer(InputSlot &amp;insertBefore, Args &amp;&amp;... args)</div><div class="ttdoc">Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itse...</div><div class="ttdef"><b>Definition:</b> <a href="_graph_8hpp_source.xhtml#l00416">Graph.hpp:416</a></div></div>
+<div class="ttc" id="classarmnn_1_1_output_slot_xhtml_a19d30f83e90f2612e6aec510715f790d"><div class="ttname"><a href="classarmnn_1_1_output_slot.xhtml#a19d30f83e90f2612e6aec510715f790d">armnn::OutputSlot::MoveAllConnections</a></div><div class="ttdeci">void MoveAllConnections(OutputSlot &amp;destination)</div><div class="ttdoc">Moves all connections to another OutputSlot. </div><div class="ttdef"><b>Definition:</b> <a href="_layer_8cpp_source.xhtml#l00116">Layer.cpp:116</a></div></div>
+<div class="ttc" id="classarmnn_1_1_layer_xhtml"><div class="ttname"><a href="classarmnn_1_1_layer.xhtml">armnn::Layer</a></div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.xhtml#l00210">Layer.hpp:210</a></div></div>
+<div class="ttc" id="structarmnn_1_1_batch_normalization_descriptor_xhtml"><div class="ttname"><a href="structarmnn_1_1_batch_normalization_descriptor.xhtml">armnn::BatchNormalizationDescriptor</a></div><div class="ttdoc">A BatchNormalizationDescriptor for the BatchNormalizationLayer. </div><div class="ttdef"><b>Definition:</b> <a href="_descriptors_8hpp_source.xhtml#l00626">Descriptors.hpp:626</a></div></div>
+<div class="ttc" id="namespacearmnn_xhtml_ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51"><div class="ttname"><a href="namespacearmnn.xhtml#ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51">armnn::DataLayout::NHWC</a></div></div>
+</div><!-- fragment --></div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+ <ul>
+ <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.xhtml">src</a></li><li class="navelem"><a class="el" href="dir_e0a84d05c80a2ef4231141dcbbeac5c8.xhtml">armnn</a></li><li class="navelem"><a class="el" href="dir_5bee762cfd03f62aa80233ed05f1bfdf.xhtml">optimizations</a></li><li class="navelem"><a class="el" href="_fuse_batch_norm_8hpp.xhtml">FuseBatchNorm.hpp</a></li>
+ <li class="footer">Generated on Thu Feb 25 2021 17:27:29 for ArmNN by
+ <a href="http://www.doxygen.org/index.html">
+ <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.13 </li>
+ </ul>
+</div>
+</body>
+</html>