ArmNN
 21.02
OptimizerTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TestUtils.hpp"
7 
8 #include <BackendSettings.hpp>
9 #include <Graph.hpp>
10 #include <Network.hpp>
11 #include <Optimizer.hpp>
12 
14 #include <armnn/INetwork.hpp>
16 
19 
23 
24 #include <boost/test/unit_test.hpp>
25 
26 using namespace armnn;
27 
28 namespace
29 {
30 
31 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
32 {
33  LstmDescriptor layerDesc;
34  layerDesc.m_ActivationFunc = 4;
35  layerDesc.m_ClippingThresCell = 0.2f;
36  layerDesc.m_ClippingThresProj = 0.4f;
37  layerDesc.m_CifgEnabled = CifgEnabled;
38  layerDesc.m_PeepholeEnabled = false;
39  layerDesc.m_ProjectionEnabled = false;
40 
41  LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
42  unsigned int batchSize = 3;
43  unsigned int inputSize = 2;
44  unsigned int numUnits = 4;
45  unsigned int outputSize = 4;
46 
47  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
48  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
49  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
50  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
51  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
52  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
53  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
54  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
55  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
56  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
57  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
58  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
59  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
60  (TensorInfo({ numUnits }, DataType::Float32));
61  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
62  (TensorInfo({ numUnits }, DataType::Float32));
63  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
64  (TensorInfo({ numUnits }, DataType::Float32));
65 
66  layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
67  layer->m_BasicParameters.m_InputToCellWeights->Allocate();
68  layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
72  layer->m_BasicParameters.m_ForgetGateBias->Allocate();
73  layer->m_BasicParameters.m_CellBias->Allocate();
74  layer->m_BasicParameters.m_OutputGateBias->Allocate();
75 
76  if (!layerDesc.m_CifgEnabled)
77  {
78  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>
79  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
80  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>
81  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
82  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>
83  (TensorInfo({ numUnits }, DataType::Float32));
84  layer->m_CifgParameters.m_InputToInputWeights->Allocate();
86  layer->m_CifgParameters.m_InputGateBias->Allocate();
87  }
88 
89  if (layerDesc.m_ProjectionEnabled)
90  {
91  layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedCpuTensorHandle>
92  (TensorInfo({ outputSize, numUnits }, DataType::Float32));
93  layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedCpuTensorHandle>
94  (TensorInfo({ outputSize }, DataType::Float32));
96  layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
97  }
98 
99  if (layerDesc.m_PeepholeEnabled)
100  {
101  if (!layerDesc.m_CifgEnabled)
102  {
103  layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedCpuTensorHandle>
104  (TensorInfo({ numUnits }, DataType::Float32));
105  layer->m_PeepholeParameters.m_CellToInputWeights->Allocate();
106  }
107  layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
108  (TensorInfo({ numUnits }, DataType::Float32));
109  layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
110  (TensorInfo({ numUnits }, DataType::Float32));
111  layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
112  layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
113  }
114 
115  // create input and output layers
116  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
117  Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
118  Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
119  Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
120  Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
121  Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
122  Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
123 
124  // connect up
125  armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
126  armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
127  armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
128  armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
130 
131  Connect(input, layer, lstmTensorInfo1, 0, 0);
132  Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
133  Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
134  Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
135  Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
136  Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
137  Connect(layer, output, lstmTensorInfo3, 3, 0);
138 }
139 
140 }
141 
143 using namespace armnn::optimizations;
144 
145 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
146 {
147  Graph graph;
148 
149  //Helper function creates graph containing LSTM layer with required input and output layers
150  CreateLSTMLayerHelper(graph, false);
151 
152  //This function used to call ValidateShapesFromInputs();
153  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
154 }
155 
156 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
157 {
158  Graph graph;
159 
160  //Helper function creates graph containing LSTM layer with required input and output layers
161  CreateLSTMLayerHelper(graph, true);
162 
163  //This function used to call ValidateShapesFromInputs();
164  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
165 }
166 
167 BOOST_AUTO_TEST_CASE(InsertConvertersTest)
168 {
169  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
170 
171  armnn::Graph graph;
172 
173  armnn::LayerBindingId inputId = 0;
174 
175  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
176 
177  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
179 
180  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
181  ->GetOutputHandler().SetTensorInfo(info);
182 
183  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
185 
186  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
188 
189  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
190  ->GetOutputHandler().SetTensorInfo(info);
191 
192  // Check graph layer sequence before inserting convert layers
193  BOOST_TEST(CheckSequence(graph.cbegin(),
194  graph.cend(),
195  &IsLayerOfType<armnn::InputLayer>,
196  &IsLayerOfType<armnn::InputLayer>,
197  &IsLayerOfType<armnn::MemCopyLayer>,
198  &IsLayerOfType<armnn::FloorLayer>,
199  &IsLayerOfType<armnn::AdditionLayer>,
200  &IsLayerOfType<armnn::OutputLayer>));
201 
202  // Check layers have Float16 DataType
203  for (auto& layer : graph)
204  {
205  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
206  {
209  }
210  }
211 
212  // Insert convert layers either side of unsupported layer
213  for (auto& layer : graph)
214  {
215  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
216  {
218  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
219  }
220  }
221 
222  // Check layers have correct DataType after inserting convert layers
223  for (auto& layer : graph)
224  {
225  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
226  {
229  }
230  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
231  {
234  }
235  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
236  {
239  }
240  }
241 
242  // Check sequence of layers after inserting convert layers
243  BOOST_TEST(CheckSequence(graph.cbegin(),
244  graph.cend(),
245  &IsLayerOfType<armnn::InputLayer>,
246  &IsLayerOfType<armnn::InputLayer>,
247  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
248  &IsLayerOfType<armnn::MemCopyLayer>,
249  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
250  &IsLayerOfType<armnn::FloorLayer>,
251  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
252  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
253  &IsLayerOfType<armnn::AdditionLayer>,
254  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
255  &IsLayerOfType<armnn::OutputLayer>));
256 }
257 
258 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
259  const unsigned int* weightsShape, const unsigned int* outputShape,
260  DataLayout dataLayout = DataLayout::NCHW)
261 {
262  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
263  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
264 
265  std::vector<float> weightsVector(90);
266  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
267 
269  desc.m_BiasEnabled = false;
270  desc.m_StrideX = 1;
271  desc.m_StrideY = 1;
272  desc.m_DataLayout = dataLayout;
273 
274  Layer* input = graph.AddLayer<InputLayer>(0, "input");
275  input->GetOutputSlot().SetTensorInfo(inputInfo);
276 
277  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
278  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
279  layer->GetOutputSlot().SetTensorInfo(outputInfo);
280 
281  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
282  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
283  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
284 }
285 
286 BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputs)
287 {
288  Graph graph;
289  const unsigned int inputShape[] = { 1, 3, 8, 16 };
290  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
291  const unsigned int outputShape[] = { 1, 2, 4, 14 };
292  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
293 
294  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
295 }
296 
297 BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputsNhwc)
298 {
299  Graph graph;
300  const unsigned int inputShape[] = { 1, 8, 16, 3 };
301  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
302  const unsigned int outputShape[] = { 1, 4, 14, 2 };
303  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
304 
305  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
306 }
307 
308 void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
309  const unsigned int* weightsShape, const unsigned int* outputShape,
310  DataLayout dataLayout = DataLayout::NCHW)
311 {
312  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
313  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
314 
315  std::vector<float> weightsVector(18);
316  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
317 
319  desc.m_BiasEnabled = false;
320  desc.m_StrideX = 1;
321  desc.m_StrideY = 1;
322  desc.m_DataLayout = dataLayout;
323 
324  Layer* input = graph.AddLayer<InputLayer>(0, "input");
325  input->GetOutputSlot().SetTensorInfo(inputInfo);
326 
327  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
328  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
329  layer->GetOutputSlot().SetTensorInfo(outputInfo);
330 
331  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
332  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
333  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
334 }
335 
336 BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputs)
337 {
338  Graph graph;
339  const unsigned int inputShape[] = { 1, 2, 3, 3 };
340  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
341  const unsigned int outputShape[] = { 1, 2, 1, 1 };
342  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
343 
344  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
345 }
346 
347 BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
348 {
349  Graph graph;
350  const unsigned int inputShape[] = { 1, 3, 3, 2 };
351  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
352  const unsigned int outputShape[] = { 1, 1, 1, 2 };
353  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
354 
355  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
356 }
357 
358 void CreatePooling2dGraph(Graph& graph, const unsigned int* inputShape, const unsigned int* outputShape,
359  DataLayout dataLayout = DataLayout::NCHW)
360 {
361  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
362  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
363 
364  Pooling2dDescriptor desc;
366  desc.m_PoolWidth = desc.m_PoolHeight = 100;
367  desc.m_StrideX = desc.m_StrideY = 5;
368  desc.m_PadLeft = 50;
369  desc.m_PadRight = 50;
370  desc.m_PadTop = 50;
371  desc.m_PadBottom = 50;
373  desc.m_DataLayout = dataLayout;
374 
375  Layer* input = graph.AddLayer<InputLayer>(0, "input");
376  input->GetOutputSlot().SetTensorInfo(inputInfo);
377 
378  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
379  layer->GetOutputSlot().SetTensorInfo(outputInfo);
380 
381  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
382  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
383  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
384 }
385 
386 BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputs)
387 {
388  Graph graph;
389  const unsigned int inputShape[] = { 5, 3, 52, 60 };
390  const unsigned int outputShape[] = { 5, 3, 11, 13 };
391  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
392 
393  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
394 }
395 
396 BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputsNhwc)
397 {
398  Graph graph;
399  const unsigned int inputShape[] = { 5, 52, 60, 3 };
400  const unsigned int outputShape[] = { 5, 11, 13, 3 };
401  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
402 
403  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
404 }
405 
406 void CreateResizeBilinearGraph(Graph& graph, const unsigned int* inputShape, const unsigned int* outputShape,
407  DataLayout dataLayout = DataLayout::NCHW)
408 {
409  TensorInfo inputInfo(4, inputShape, DataType::Float32);
410  TensorInfo outputInfo(4, outputShape, DataType::Float32);
411 
412  ResizeDescriptor desc;
414  desc.m_TargetHeight = 3;
415  desc.m_TargetWidth = 4;
416  desc.m_DataLayout = dataLayout;
417 
418  Layer* input = graph.AddLayer<InputLayer>(0, "input");
419  input->GetOutputSlot().SetTensorInfo(inputInfo);
420 
421  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
422  layer->GetOutputSlot().SetTensorInfo(outputInfo);
423 
424  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
425  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
426  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
427 }
428 
429 BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputs)
430 {
431  Graph graph;
432  const unsigned int inputShape[] = { 1, 2, 4, 5 };
433  const unsigned int outputShape[] = { 1, 2, 3, 4 };
434  CreateResizeBilinearGraph(graph, inputShape, outputShape);
435 
436  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
437 }
438 
439 BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputsNhwc)
440 {
441  Graph graph;
442  const unsigned int inputShape[] = { 1, 4, 5, 2 };
443  const unsigned int outputShape[] = { 1, 3, 4, 2 };
444  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
445 
446  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
447 }
448 
449 void CreateGatherGraph(Graph& graph, const armnn::TensorInfo& paramsInfo, const armnn::TensorInfo& indicesInfo,
450  const armnn::TensorInfo& outputInfo)
451 {
452  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
453  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
454 
455  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
456  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
457 
458  GatherDescriptor descriptor;
459  GatherLayer* layer = graph.AddLayer<GatherLayer>(descriptor, "gather");
460  layer->GetOutputSlot().SetTensorInfo(outputInfo);
461 
462  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
463  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
464  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
465  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
466 }
467 
468 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs)
469 {
470  Graph graph;
471  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
472  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
473  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
474 
475  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
476 
477  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
478 }
479 
480 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs1DParams)
481 {
482  Graph graph;
483  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
484  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
485  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
486 
487  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
488 
489  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
490 }
491 
492 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
493 {
494  Graph graph;
495  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
496  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
497  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
498 
499  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
500 
501  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
502 }
503 
504 BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
505 {
506  Graph graph;
507  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
509  std::vector<uint8_t> anchorsVector(40);
511 
512  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
513  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
514  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
515  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
516 
517  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
518  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
519 
520  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
522 
524  descriptor.m_MaxDetections = 3;
525 
526  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
527  layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
528  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
529  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
530  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
531  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
532 
533  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
534  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
535 
536  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
537 }
538 
539 BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
540 {
541  Graph graph;
542  const unsigned int inputShape[] = { 1, 2, 2, 3 };
543  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
544  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
545  const unsigned int outputShape[] = { 1, 2, 1, 1 };
546 
547  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
548  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
549  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
550 
551  Layer* input = graph.AddLayer<InputLayer>(0, "input");
552  input->GetOutputSlot().SetTensorInfo(inputInfo);
553 
554  PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
555 
556  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
557  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
558 
559  Convolution2dDescriptor convolution2dDescriptor;
560  convolution2dDescriptor.m_BiasEnabled = false;
561  convolution2dDescriptor.m_StrideX = 1;
562  convolution2dDescriptor.m_StrideY = 1;
563  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
564 
565  std::vector<float> weightsVector(18);
566  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
567 
568  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d");
569  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
570  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
571 
572  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
573 
574  // Connect up layers - input -> pad -> conv2d -> output
575  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
576  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
577  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
578 
579  auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool
580  {
581  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
582  const auto conv2dLayerParams = conv2dLayer->GetParameters();
583  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
584  (layer->GetNameStr() == "conv2d") &&
585  (conv2dLayerParams.m_PadLeft == 0) &&
586  (conv2dLayerParams.m_PadRight == 0) &&
587  (conv2dLayerParams.m_PadTop == 0) &&
588  (conv2dLayerParams.m_PadBottom == 0) &&
589  (conv2dLayerParams.m_BiasEnabled == false) &&
590  (conv2dLayerParams.m_StrideX == 1) &&
591  (conv2dLayerParams.m_StrideY == 1) &&
592  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
593  };
594 
595  BOOST_TEST(CheckSequence(graph.cbegin(),
596  graph.cend(),
597  &IsLayerOfType<armnn::InputLayer>,
598  &IsLayerOfType<armnn::PadLayer>,
599  checkSimpleConv2d,
600  &IsLayerOfType<armnn::OutputLayer>));
601 
603 
604  auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool
605  {
606  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
607  const auto conv2dLayerParams = conv2dLayer->GetParameters();
608  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
609  (layer->GetNameStr() == "folded-pad-into-conv2d") &&
610  (conv2dLayerParams.m_PadLeft == 2) &&
611  (conv2dLayerParams.m_PadRight == 2) &&
612  (conv2dLayerParams.m_PadTop == 2) &&
613  (conv2dLayerParams.m_PadBottom == 2) &&
614  (conv2dLayerParams.m_BiasEnabled == false) &&
615  (conv2dLayerParams.m_StrideX == 1) &&
616  (conv2dLayerParams.m_StrideY == 1) &&
617  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
618  };
619 
620  BOOST_TEST(CheckSequence(graph.cbegin(),
621  graph.cend(),
622  &IsLayerOfType<armnn::InputLayer>,
623  checkPadFoldedIntoConv2d,
624  &IsLayerOfType<armnn::OutputLayer>));
625 }
626 
627 class MockLayerSupport : public LayerSupportBase {
628 public:
629  bool IsInputSupported(const TensorInfo& /*input*/,
630  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
631  {
632  return true;
633  }
634 
635  bool IsOutputSupported(const TensorInfo& /*input*/,
636  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
637  {
638  return true;
639  }
640 
641  bool IsActivationSupported(const TensorInfo& /*input0*/,
642  const TensorInfo& /*output*/,
643  const ActivationDescriptor& /*descriptor*/,
644  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
645  {
646  return true;
647  }
648 };
649 
650 template<typename NamePolicy>
651 class MockBackend : public IBackendInternal
652 {
653 public:
654  MockBackend() = default;
655  ~MockBackend() = default;
656 
657  static const BackendId& GetIdStatic() { return NamePolicy::GetIdStatic(); }
658  const BackendId& GetId() const override { return GetIdStatic(); }
659 
660  IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override { return nullptr; };
661 
662  IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(
663  const IBackendInternal::IMemoryManagerSharedPtr&) const override { return nullptr; }
664 
665  IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
666  {
667  return nullptr;
668  }
669 
670  IBackendInternal::Optimizations GetOptimizations() const override { return {}; }
671  IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
672  {
673  return std::make_shared<MockLayerSupport>();
674  }
675 
676  OptimizationViews OptimizeSubgraphView(const SubgraphView&) const override
677  {
678  return {};
679  };
680 };
681 
682 BOOST_AUTO_TEST_CASE(BackendHintTest)
683 {
684  class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
685  {
686  public:
687  void VisitInputLayer(const IConnectableLayer* layer,
688  LayerBindingId id,
689  const char* name = nullptr) override
690  {
691  IgnoreUnused(id, name);
692  auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
693  BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
694  }
695 
696  void VisitOutputLayer(const IConnectableLayer* layer,
697  LayerBindingId id,
698  const char* name = nullptr) override
699  {
700  IgnoreUnused(id, name);
701  auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
702  BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
703  }
704 
705  void VisitActivationLayer(const IConnectableLayer* layer,
706  const ActivationDescriptor& activationDescriptor,
707  const char* name = nullptr) override
708  {
709  IgnoreUnused(activationDescriptor, name);
710  auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
711  BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
712  }
713  };
714 
715  struct CustomPolicy
716  {
717  static const BackendId& GetIdStatic()
718  {
719  static BackendId id="CustomBackend";
720  return id;
721  }
722  };
723 
724  struct MockPolicy
725  {
726  static const BackendId& GetIdStatic()
727  {
728  static BackendId id="MockBackend";
729  return id;
730  }
731  };
732 
733  auto& backendRegistry = BackendRegistryInstance();
734 
735  backendRegistry.Register("MockBackend", [](){
736  return std::make_unique<MockBackend<MockPolicy>>();
737  });
738 
739  backendRegistry.Register("CustomBackend", [](){
740  return std::make_unique<MockBackend<CustomPolicy>>();
741  });
742 
743  // Define the network
744  auto network = INetwork::Create();
747 
748  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
749  auto input = graph->AddLayer<InputLayer>(0, "input");
750  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
751  auto output = graph->AddLayer<OutputLayer>(0, "output");
752 
753  BackendId customBackendId("CustomBackend");
754  act->BackendSelectionHint(customBackendId);
755 
756  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
757  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
758 
759  OptimizedNetworkImpl optNet(std::move(graph));
760 
761  // Get the optimized graph
762  Graph& optGraph = optNet.GetGraph();
763 
764  std::vector<BackendId> prefs{"MockBackend", "CustomBackend"};
765 
766  BackendIdSet availableBackends = {"CustomBackend", "MockBackend"};
767  DeviceSpec spec(availableBackends);
768 
769  BackendSettings backendSettings(prefs, spec);
770 
771  // Assign an available backend to each layer
772  Graph::Iterator firstLayer = optGraph.begin();
773  Graph::Iterator lastLayer = optGraph.end();
774 
775  OptimizedNetworkImpl* optNetObjPtr = &optNet;
776  OptimizationResult res = AssignBackends(optNetObjPtr,
777  backendSettings,
778  firstLayer,
779  lastLayer,
780  EmptyOptional());
781 
782  BOOST_TEST(res.IsOk());
783 
784  TestBackendAssignment visitor;
785  for (auto it =firstLayer; it != lastLayer; ++it)
786  {
787  (*it)->Accept(visitor);
788  }
789 }
790 
791 // Tests that OptimizeForExclusiveConnections works, fusing when needed, using BatchNorm fusing as example
792 BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
793 {
794  using namespace armnn;
795  // Define layers information
796  Convolution2dDescriptor convolution2dDescriptor;
797  convolution2dDescriptor.m_BiasEnabled = false;
798  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
799  BatchNormalizationDescriptor batchNormDescriptor;
800  batchNormDescriptor.m_DataLayout = DataLayout::NHWC;
801 
802  const unsigned int inputDimensionSizes[] = {1, 4, 4, 3}; // NHWCin
803  const unsigned int weightsDimensionSizes[] = {1, 2, 2, 3}; // CoutHWCin
804  const unsigned int outputDimensionSizes[] = {1, 3, 3, 1}; // NHWCout
805  const unsigned int outputChannelSize[] = {outputDimensionSizes[3]}; // Cout
806 
807  TensorInfo inputInfo(4, inputDimensionSizes, DataType::Float32);
808  TensorInfo outputInfo(4, outputDimensionSizes, DataType::Float32);
809 
810  std::vector<float> weightsVector = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
811  ConstTensor weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32), weightsVector);
812 
813  std::vector<float> betaVector = { 0.1f };
814  std::vector<float> gammaVector = { 0.5f };
815  std::vector<float> meanVector = { 0 };
816  std::vector<float> varianceVector = { 1 };
817  ConstTensor beta(TensorInfo(1, outputChannelSize, DataType::Float32), betaVector);
818  ConstTensor gamma(TensorInfo(1, outputChannelSize, DataType::Float32), gammaVector);
819  ConstTensor mean(TensorInfo(1, outputChannelSize, DataType::Float32), meanVector);
820  ConstTensor variance(TensorInfo(1, outputChannelSize, DataType::Float32), varianceVector);
821 
822  // Define the network
823  Graph graph;
824  auto input = graph.AddLayer<InputLayer>(0, "input");
825  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
826  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
827  auto output = graph.AddLayer<OutputLayer>(0, "output");
828 
829  // Set layer information
830  input->GetOutputSlot().SetTensorInfo(inputInfo);
831  conv->GetOutputSlot().SetTensorInfo(outputInfo);
832  batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
833  conv->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
834  batchNorm->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
835  batchNorm->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
836  batchNorm->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
837  batchNorm->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
838  if (convolution2dDescriptor.m_BiasEnabled)
839  {
840  std::vector<float> biasVector = {11};
841  ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32), biasVector);
842  conv->m_Bias = std::make_unique<ScopedCpuTensorHandle>(bias);
843  }
844 
845  // Connect layers
846  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
847  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
848  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
849 
850  BOOST_CHECK(4 == graph.GetNumLayers());
851  BOOST_TEST(CheckSequence(graph.cbegin(),
852  graph.cend(),
853  &IsLayerOfType<InputLayer>,
854  &IsLayerOfType<Convolution2dLayer>,
855  &IsLayerOfType<BatchNormalizationLayer>,
856  &IsLayerOfType<OutputLayer>));
857 
858  // Optimize graph
860 
861  auto checkFusedConv2d = [](const armnn::Layer* const layer)->bool
862  {
863  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
864  (layer->GetNameStr() == "fused-batchNorm-into-convolution");
865  };
866 
867  BOOST_CHECK(3 == graph.GetNumLayers());
868  BOOST_TEST(CheckSequence(graph.cbegin(),
869  graph.cend(),
870  &IsLayerOfType<InputLayer>,
871  checkFusedConv2d,
872  &IsLayerOfType<OutputLayer>));
873 }
874 
875 // Tests that OptimizeForExclusiveConnections works, not fusing when not needed, using BatchNorm fusing as example
876 BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsWithoutFuseTest)
877 {
878  // Define the network
879  Graph graph;
880  Convolution2dDescriptor convolution2dDescriptor;
881  BatchNormalizationDescriptor batchNormDescriptor;
882 
883  auto input = graph.AddLayer<InputLayer>(0, "input");
884  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
885  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
886  auto output = graph.AddLayer<OutputLayer>(0, "output");
887  auto output2 = graph.AddLayer<OutputLayer>(1, "output2");
888 
889  // Connect layers
890  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
891  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
892  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
893  conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
894 
895  BOOST_CHECK(5 == graph.GetNumLayers());
896  BOOST_TEST(CheckSequence(graph.cbegin(),
897  graph.cend(),
898  &IsLayerOfType<armnn::InputLayer>,
899  &IsLayerOfType<armnn::Convolution2dLayer>,
900  &IsLayerOfType<armnn::BatchNormalizationLayer>,
901  &IsLayerOfType<armnn::OutputLayer>,
902  &IsLayerOfType<armnn::OutputLayer>));
903  // Optimize graph
905 
906  BOOST_CHECK(5 == graph.GetNumLayers());
907  BOOST_TEST(CheckSequence(graph.cbegin(),
908  graph.cend(),
909  &IsLayerOfType<armnn::InputLayer>,
910  &IsLayerOfType<armnn::Convolution2dLayer>,
911  &IsLayerOfType<armnn::BatchNormalizationLayer>,
912  &IsLayerOfType<armnn::OutputLayer>,
913  &IsLayerOfType<armnn::OutputLayer>));
914 }
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:162
std::unique_ptr< ScopedCpuTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:69
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:61
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:65
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:869
LstmBasicParameters m_BasicParameters
Definition: LstmLayer.hpp:81
This layer represents a batch normalization operation.
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::vector< OptimizationPtr > Optimizations
const Parameters & GetParameters() const
DataLayout
Definition: Types.hpp:50
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:29
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
float m_ClippingThresProj
Clipping threshold value for the projection.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:330
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
A Convolution2dDescriptor for the Convolution2dLayer.
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:63
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::unique_ptr< ScopedCpuTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:73
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a detection postprocess operator.
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a pad operation.
Definition: PadLayer.hpp:14
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:71
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
A ResizeDescriptor for the ResizeLayer.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:49
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:194
std::unique_ptr< ScopedCpuTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:47
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
const std::string & GetNameStr() const
Definition: Layer.hpp:220
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:31
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
Visitor base class with empty implementations.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:67
LstmOptPeepholeParameters m_PeepholeParameters
Definition: LstmLayer.hpp:84
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
LstmOptProjectionParameters m_ProjectionParameters
Definition: LstmLayer.hpp:83
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
Definition: LstmLayer.hpp:41
BOOST_AUTO_TEST_SUITE_END()
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
std::unique_ptr< ScopedCpuTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:59
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void InferTensorInfos()
Definition: Graph.cpp:529
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:164
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::unique_ptr< ScopedCpuTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:51
DataType GetDataType() const
Definition: Layer.cpp:283
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:39
LstmOptCifgParameters m_CifgParameters
Definition: LstmLayer.hpp:82
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A Pooling2dDescriptor for the Pooling2dLayer.
size_t GetNumLayers() const
Definition: Graph.hpp:191
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:57
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
std::unique_ptr< ScopedCpuTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:33
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
std::unique_ptr< IBackendContext > IBackendContextPtr