ArmNN
 21.02
FuseBatchNormTests.cpp File Reference
#include "LayersFwd.hpp"
#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
#include <test/TestUtils.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

template<typename Conv2dTest , armnn::DataType ArmnnType, typename ConvDescriptorType = typename Conv2dTest::ConvDescriptorType, typename T = armnn::ResolveType<ArmnnType>>
INetworkPtr CreatNetwork (bool depthwise, bool preventFusing)
 
template<typename Conv2dTest , armnn::DataType ArmnnType, typename ConvDescriptorType = typename Conv2dTest::ConvDescriptorType, typename ConvLayerType = typename Conv2dTest::ConvLayerType, typename T = armnn::ResolveType<ArmnnType>>
void FuseBatchNormIntoConvTest (bool depthwise, float tolerance, armnn::Compute backendId)
 

Function Documentation

◆ CreatNetwork()

INetworkPtr CreatNetwork ( bool  depthwise,
bool  preventFusing 
)

Definition at line 76 of file FuseBatchNormTests.cpp.

References IOutputSlot::Connect(), INetwork::Create(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), BatchNormalizationDescriptor::m_DataLayout, armnn::NHWC, and IOutputSlot::SetTensorInfo().

77 {
78  // Define layers information
79  ConvDescriptorType convolution2dDescriptor;
80  convolution2dDescriptor.m_BiasEnabled = false;
81  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
82  convolution2dDescriptor.m_StrideX = 1;
83  convolution2dDescriptor.m_StrideY = 1;
84  BatchNormalizationDescriptor batchNormDescriptor;
85  batchNormDescriptor.m_DataLayout = DataLayout::NHWC;
86 
87  const unsigned int inputDimensionSizes[] = {1, 4, 4, 3}; // NHWCin
88  unsigned int weightsDimensionSizes[] = {4, 2, 2, 3}; // CoutHWCin
89  unsigned int outputDimensionSizes[] = {1, 3, 3, 4}; // NHWCout
90 
91  if (depthwise)
92  {
93  //M Cin H W
94  weightsDimensionSizes[0] = 4;
95  weightsDimensionSizes[1] = 3;
96  weightsDimensionSizes[2] = 2;
97  weightsDimensionSizes[3] = 2;
98  outputDimensionSizes[3] = weightsDimensionSizes[0] * weightsDimensionSizes[1];
99  }
100  const unsigned int outputChannelSize[] = {outputDimensionSizes[3]}; // Cout
101 
102  TensorInfo inputInfo(4, inputDimensionSizes, ArmnnType);
103  TensorInfo outputInfo(4, outputDimensionSizes, ArmnnType);
104 
105  std::vector<int> weightsIntVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
106  11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
107  21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
108  31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
109  std::vector<T> weightsVector(begin(weightsIntVector), end(weightsIntVector));
110  TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType);
111  ConstTensor weights(weightsInfo, weightsVector);
112 
113  std::vector<T> biasVector = GetVector<T>(outputDimensionSizes[3], 3.3f, 0.1f);
114  TensorInfo biasInfo(1, outputChannelSize, ArmnnType);
115  ConstTensor bias(biasInfo, biasVector);
116  Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
117 
118  std::vector<T> betaVector = GetVector<T>(outputDimensionSizes[3], 0.0f, 0.2f);
119  std::vector<T> gammaVector = GetVector<T>(outputDimensionSizes[3], 0.5f, 0.1f);
120  std::vector<T> meanVector = GetVector<T>(outputDimensionSizes[3], 0.1f, 0.1f);
121  std::vector<T> varianceVector = GetVector<T>(outputDimensionSizes[3], 1.0f, 0.1f);
122 
123  ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
124  ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
125  ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
126  ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
127 
128  // Create a network
129  INetworkPtr network = INetwork::Create();
130 
131  IConnectableLayer* inputLayer = network->AddInputLayer(0);
132 
133  IConnectableLayer* convLayer = Conv2dTest::AddConvolution(network.get(),
134  convolution2dDescriptor,
135  weights,
136  optionalBias,
137  "convolution");
138 
139  IConnectableLayer* batchNormLayer = network->AddBatchNormalizationLayer(batchNormDescriptor,
140  mean,
141  variance,
142  beta,
143  gamma,
144  "batchNorm");
145 
146  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
147  IConnectableLayer* output2Layer = nullptr;
148 
149  if (preventFusing)
150  {
151  output2Layer = network->AddOutputLayer(1);
152  }
153 
154  // Set layer information
155  inputLayer ->GetOutputSlot(0).SetTensorInfo(inputInfo);
156  convLayer ->GetOutputSlot(0).SetTensorInfo(outputInfo);
157  batchNormLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
158 
159  // Connect layers
160  inputLayer ->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
161  convLayer ->GetOutputSlot(0).Connect(batchNormLayer->GetInputSlot(0));
162  batchNormLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
163 
164  if (preventFusing)
165  {
166  convLayer ->GetOutputSlot(0).Connect(output2Layer->GetInputSlot(0));
167  }
168 
169  return network;
170 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A BatchNormalizationDescriptor for the BatchNormalizationLayer.

◆ FuseBatchNormIntoConvTest()

void FuseBatchNormIntoConvTest ( bool  depthwise,
float  tolerance,
armnn::Compute  backendId 
)

Definition at line 177 of file FuseBatchNormTests.cpp.

178 {
179  // FIRST NETWORK: Fused
180  // Construct ArmNN network
181  INetworkPtr networkFused = CreatNetwork<Conv2dTest, ArmnnType>(depthwise, false);
182 
183  // Create ArmNN runtime
184  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
185 
186  // Optimise ArmNN network
187  IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
188 
189  Graph& graphFused = GetGraphForTesting(optNetFused.get());
190 
191  auto checkFusedConv2d = [ ](const armnn::Layer* const layer) -> bool
192  {
193  return IsLayerOfType<ConvLayerType>(layer) &&
194  (layer->GetNameStr() == "fused-batchNorm-into-convolution");
195  };
196 
197  BOOST_CHECK(3 == graphFused.GetNumLayers());
198  BOOST_TEST(CheckSequence(graphFused.cbegin(),
199  graphFused.cend(),
200  &IsLayerOfType<InputLayer>,
201  checkFusedConv2d,
202  &IsLayerOfType<OutputLayer>));
203 
204  // Load network into runtime
205  NetworkId networkIdentifier;
206  BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
207 
208  //Creates structures for inputs and outputs.
209  std::vector<T> inputDataFused = GetVector<T>(48, 1.0f, 0.1f);
210 
211  std::vector<T> outputDataFused(36);
212 
213  if (depthwise)
214  {
215  outputDataFused.resize(108);
216  }
217 
218  InputTensors inputTensorsFused {
219  {0, ConstTensor(run->GetInputTensorInfo (networkIdentifier, 0), inputDataFused.data())}};
220  OutputTensors outputTensorsFused{
221  {0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
222 
223  // Execute network
224  run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused);
225 
226  // SECOND NETWORK: NotFused
227  // Construct ArmNN network
228  INetworkPtr networkNotFused = CreatNetwork<Conv2dTest, ArmnnType>(depthwise, true);
229 
230  // Create ArmNN runtime
231  IRuntimePtr runNotFused = IRuntime::Create(IRuntime::CreationOptions()); // default options
232 
233  // Optimise ArmNN network
234  IOptimizedNetworkPtr optNetNotFused = Optimize(*networkNotFused, {backendId}, runNotFused->GetDeviceSpec());
235 
236  Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
237 
238  BOOST_CHECK(5 == graphNotFused.GetNumLayers());
239  BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
240  graphNotFused.cend(),
241  &IsLayerOfType<armnn::InputLayer>,
242  &IsLayerOfType<ConvLayerType>,
243  &IsLayerOfType<armnn::BatchNormalizationLayer>,
244  &IsLayerOfType<armnn::OutputLayer>,
245  &IsLayerOfType<armnn::OutputLayer>));
246 
247  // Load network into runtime
248  NetworkId networkIdentifierNotFused;
249  BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
250 
251  //Creates structures for inputs and outputs.
252  std::vector<T> inputDataNotFused = GetVector<T>(48, 1.0f, 0.1f);
253 
254  std::vector<T> outputDataNotFused(36);
255  std::vector<T> outputData2NotFused(36);
256 
257  if (depthwise)
258  {
259  outputDataNotFused.resize(108);
260  outputData2NotFused.resize(108);
261  }
262  InputTensors inputTensorsNotFused{
263  {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
264  OutputTensors outputTensorsNotFused{
265  {0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
266  {1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
267 
268  // Execute network
269  runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused);
270 
271  // Check the output of the fused-convolution matches with the output of the batchNormm in the "NotFused" network
272  for (unsigned int n = 0; n < outputDataFused.size(); ++n)
273  {
274  BOOST_CHECK_CLOSE(outputDataFused[n], outputDataNotFused[n], T(tolerance));
275  }
276 }
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
int NetworkId
Definition: IRuntime.hpp:20
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:306
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173