ArmNN
 20.02
OptimizerTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TestUtils.hpp"
7 
8 #include <BackendSettings.hpp>
9 #include <Graph.hpp>
10 #include <Network.hpp>
11 #include <Optimizer.hpp>
12 
14 #include <armnn/INetwork.hpp>
16 
18 
22 
23 #include <boost/test/unit_test.hpp>
24 
25 using namespace armnn;
26 
27 namespace
28 {
29 
30 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
31 {
32  LstmDescriptor layerDesc;
33  layerDesc.m_ActivationFunc = 4;
34  layerDesc.m_ClippingThresCell = 0.2f;
35  layerDesc.m_ClippingThresProj = 0.4f;
36  layerDesc.m_CifgEnabled = CifgEnabled;
37  layerDesc.m_PeepholeEnabled = false;
38  layerDesc.m_ProjectionEnabled = false;
39 
40  LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
41  unsigned int batchSize = 3;
42  unsigned int inputSize = 2;
43  unsigned int numUnits = 4;
44  unsigned int outputSize = 4;
45 
46  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
47  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
48  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
49  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
50  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
51  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
52  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
53  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
54  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
55  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
56  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
57  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
58  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
59  (TensorInfo({ numUnits }, DataType::Float32));
60  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
61  (TensorInfo({ numUnits }, DataType::Float32));
62  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
63  (TensorInfo({ numUnits }, DataType::Float32));
64 
65  layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
66  layer->m_BasicParameters.m_InputToCellWeights->Allocate();
67  layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
71  layer->m_BasicParameters.m_ForgetGateBias->Allocate();
72  layer->m_BasicParameters.m_CellBias->Allocate();
73  layer->m_BasicParameters.m_OutputGateBias->Allocate();
74 
75  if (!layerDesc.m_CifgEnabled)
76  {
77  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>
78  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
79  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>
80  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
81  layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<ScopedCpuTensorHandle>
82  (TensorInfo({ numUnits }, DataType::Float32));
83  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>
84  (TensorInfo({ numUnits }, DataType::Float32));
85  layer->m_CifgParameters.m_InputToInputWeights->Allocate();
87  layer->m_CifgParameters.m_CellToInputWeights->Allocate();
88  layer->m_CifgParameters.m_InputGateBias->Allocate();
89  }
90 
91  if (layerDesc.m_ProjectionEnabled)
92  {
93  layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedCpuTensorHandle>
94  (TensorInfo({ outputSize, numUnits }, DataType::Float32));
95  layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedCpuTensorHandle>
96  (TensorInfo({ outputSize }, DataType::Float32));
98  layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
99  }
100 
101  if (layerDesc.m_PeepholeEnabled)
102  {
103  layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
104  (TensorInfo({ numUnits }, DataType::Float32));
105  layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
106  (TensorInfo({ numUnits }, DataType::Float32));
107  layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
108  layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
109  }
110 
111  // create input and output layers
112  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
113  Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
114  Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
115  Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
116  Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
117  Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
118  Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
119 
120  // connect up
121  armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
122  armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
123  armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
124  armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
126 
127  Connect(input, layer, lstmTensorInfo1, 0, 0);
128  Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
129  Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
130  Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
131  Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
132  Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
133  Connect(layer, output, lstmTensorInfo3, 3, 0);
134 }
135 
136 }
137 
139 using namespace armnn::optimizations;
140 
141 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
142 {
143  Graph graph;
144 
145  //Helper function creates graph containing LSTM layer with required input and output layers
146  CreateLSTMLayerHelper(graph, false);
147 
148  //This function used to call ValidateShapesFromInputs();
149  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
150 }
151 
152 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
153 {
154  Graph graph;
155 
156  //Helper function creates graph containing LSTM layer with required input and output layers
157  CreateLSTMLayerHelper(graph, true);
158 
159  //This function used to call ValidateShapesFromInputs();
160  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
161 }
162 
163 BOOST_AUTO_TEST_CASE(InsertConvertersTest)
164 {
165  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
166 
167  armnn::Graph graph;
168 
169  armnn::LayerBindingId inputId = 0;
170 
171  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
172 
173  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
175 
176  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
177  ->GetOutputHandler().SetTensorInfo(info);
178 
179  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
181 
182  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
184 
185  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
186  ->GetOutputHandler().SetTensorInfo(info);
187 
188  // Check graph layer sequence before inserting convert layers
189  BOOST_TEST(CheckSequence(graph.cbegin(),
190  graph.cend(),
191  &IsLayerOfType<armnn::InputLayer>,
192  &IsLayerOfType<armnn::InputLayer>,
193  &IsLayerOfType<armnn::MemCopyLayer>,
194  &IsLayerOfType<armnn::FloorLayer>,
195  &IsLayerOfType<armnn::AdditionLayer>,
196  &IsLayerOfType<armnn::OutputLayer>));
197 
198  // Check layers have Float16 DataType
199  for (auto& layer : graph)
200  {
201  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
202  {
203  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
204  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
205  }
206  }
207 
208  // Insert convert layers either side of unsupported layer
209  for (auto& layer : graph)
210  {
211  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
212  {
214  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
215  }
216  }
217 
218  // Check layers have correct DataType after inserting convert layers
219  for (auto& layer : graph)
220  {
221  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
222  {
223  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
224  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
225  }
226  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
227  {
228  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
229  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
230  }
231  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
232  {
233  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
234  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
235  }
236  }
237 
238  // Check sequence of layers after inserting convert layers
239  BOOST_TEST(CheckSequence(graph.cbegin(),
240  graph.cend(),
241  &IsLayerOfType<armnn::InputLayer>,
242  &IsLayerOfType<armnn::InputLayer>,
243  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
244  &IsLayerOfType<armnn::MemCopyLayer>,
245  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
246  &IsLayerOfType<armnn::FloorLayer>,
247  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
248  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
249  &IsLayerOfType<armnn::AdditionLayer>,
250  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
251  &IsLayerOfType<armnn::OutputLayer>));
252 }
253 
254 
255 
256 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
257  const unsigned int* weightsShape, const unsigned int* outputShape,
258  DataLayout dataLayout = DataLayout::NCHW)
259 {
260  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
261  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
262 
263  std::vector<float> weightsVector(90);
264  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
265 
267  desc.m_BiasEnabled = false;
268  desc.m_StrideX = 1;
269  desc.m_StrideY = 1;
270  desc.m_DataLayout = dataLayout;
271 
272  Layer* input = graph.AddLayer<InputLayer>(0, "input");
273  input->GetOutputSlot().SetTensorInfo(inputInfo);
274 
275  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
276  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
277  layer->GetOutputSlot().SetTensorInfo(outputInfo);
278 
279  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
280  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
281  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
282 }
283 
284 BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputs)
285 {
286  Graph graph;
287  const unsigned int inputShape[] = { 1, 3, 8, 16 };
288  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
289  const unsigned int outputShape[] = { 1, 2, 4, 14 };
290  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
291 
292  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
293 }
294 
295 BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputsNhwc)
296 {
297  Graph graph;
298  const unsigned int inputShape[] = { 1, 8, 16, 3 };
299  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
300  const unsigned int outputShape[] = { 1, 4, 14, 2 };
301  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
302 
303  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
304 }
305 
306 void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
307  const unsigned int* weightsShape, const unsigned int* outputShape,
308  DataLayout dataLayout = DataLayout::NCHW)
309 {
310  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
311  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
312 
313  std::vector<float> weightsVector(18);
314  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
315 
317  desc.m_BiasEnabled = false;
318  desc.m_StrideX = 1;
319  desc.m_StrideY = 1;
320  desc.m_DataLayout = dataLayout;
321 
322  Layer* input = graph.AddLayer<InputLayer>(0, "input");
323  input->GetOutputSlot().SetTensorInfo(inputInfo);
324 
325  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
326  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
327  layer->GetOutputSlot().SetTensorInfo(outputInfo);
328 
329  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
330  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
331  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
332 }
333 
334 BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputs)
335 {
336  Graph graph;
337  const unsigned int inputShape[] = { 1, 2, 3, 3 };
338  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
339  const unsigned int outputShape[] = { 1, 2, 1, 1 };
340  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
341 
342  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
343 }
344 
345 BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
346 {
347  Graph graph;
348  const unsigned int inputShape[] = { 1, 3, 3, 2 };
349  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
350  const unsigned int outputShape[] = { 1, 1, 1, 2 };
351  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
352 
353  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
354 }
355 
356 void CreatePooling2dGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* outputShape,
357  DataLayout dataLayout = DataLayout::NCHW)
358 {
359  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
360  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
361 
362  Pooling2dDescriptor desc;
364  desc.m_PoolWidth = desc.m_PoolHeight = 100;
365  desc.m_StrideX = desc.m_StrideY = 5;
366  desc.m_PadLeft = 50;
367  desc.m_PadRight = 50;
368  desc.m_PadTop = 50;
369  desc.m_PadBottom = 50;
371  desc.m_DataLayout = dataLayout;
372 
373  Layer* input = graph.AddLayer<InputLayer>(0, "input");
374  input->GetOutputSlot().SetTensorInfo(inputInfo);
375 
376  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
377  layer->GetOutputSlot().SetTensorInfo(outputInfo);
378 
379  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
380  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
381  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
382 }
383 
384 BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputs)
385 {
386  Graph graph;
387  const unsigned int inputShape[] = { 5, 3, 52, 60 };
388  const unsigned int outputShape[] = { 5, 3, 11, 13 };
389  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
390 
391  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
392 }
393 
394 BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputsNhwc)
395 {
396  Graph graph;
397  const unsigned int inputShape[] = { 5, 52, 60, 3 };
398  const unsigned int outputShape[] = { 5, 11, 13, 3 };
399  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
400 
401  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
402 }
403 
404 void CreateResizeBilinearGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* outputShape,
405  DataLayout dataLayout = DataLayout::NCHW)
406 {
407  TensorInfo inputInfo(4, inputShape, DataType::Float32);
408  TensorInfo outputInfo(4, outputShape, DataType::Float32);
409 
410  ResizeDescriptor desc;
412  desc.m_TargetHeight = 3;
413  desc.m_TargetWidth = 4;
414  desc.m_DataLayout = dataLayout;
415 
416  Layer* input = graph.AddLayer<InputLayer>(0, "input");
417  input->GetOutputSlot().SetTensorInfo(inputInfo);
418 
419  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
420  layer->GetOutputSlot().SetTensorInfo(outputInfo);
421 
422  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
423  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
424  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
425 }
426 
427 BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputs)
428 {
429  Graph graph;
430  const unsigned int inputShape[] = { 1, 2, 4, 5 };
431  const unsigned int outputShape[] = { 1, 2, 3, 4 };
432  CreateResizeBilinearGraph(graph, inputShape, outputShape);
433 
434  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
435 }
436 
437 BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputsNhwc)
438 {
439  Graph graph;
440  const unsigned int inputShape[] = { 1, 4, 5, 2 };
441  const unsigned int outputShape[] = { 1, 3, 4, 2 };
442  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
443 
444  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
445 }
446 
447 
448 void CreateGatherGraph(Graph& graph, const armnn::TensorInfo& paramsInfo, const armnn::TensorInfo& indicesInfo,
449  const armnn::TensorInfo& outputInfo)
450 {
451  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
452  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
453 
454  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
455  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
456 
457  GatherLayer* layer = graph.AddLayer<GatherLayer>("gather");
458  layer->GetOutputSlot().SetTensorInfo(outputInfo);
459 
460  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
461  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
462  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
463  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
464 }
465 
466 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs)
467 {
468  Graph graph;
469  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
470  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
471  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
472 
473  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
474 
475  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
476 }
477 
478 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs1DParams)
479 {
480  Graph graph;
481  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
482  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
483  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
484 
485  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
486 
487  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
488 }
489 
490 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
491 {
492  Graph graph;
493  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
494  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
495  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
496 
497  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
498 
499  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
500 }
501 
502 BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
503 {
504  Graph graph;
505  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
507  std::vector<uint8_t> anchorsVector(40);
509 
510  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
511  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
512  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
513  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
514 
515  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
516  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
517 
518  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
520 
522  descriptor.m_MaxDetections = 3;
523 
524  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
525  layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
526  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
527  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
528  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
529  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
530 
531  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
532  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
533 
534  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
535 }
536 
537 BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
538 {
539  Graph graph;
540  const unsigned int inputShape[] = { 1, 2, 2, 3 };
541  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
542  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
543  const unsigned int outputShape[] = { 1, 2, 1, 1 };
544 
545 
546  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
547  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
548  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
549 
550  Layer* input = graph.AddLayer<InputLayer>(0, "input");
551  input->GetOutputSlot().SetTensorInfo(inputInfo);
552 
553  PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
554 
555  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
556  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
557 
558  Convolution2dDescriptor convolution2dDescriptor;
559  convolution2dDescriptor.m_BiasEnabled = false;
560  convolution2dDescriptor.m_StrideX = 1;
561  convolution2dDescriptor.m_StrideY = 1;
562  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
563 
564  std::vector<float> weightsVector(18);
565  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
566 
567  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d");
568  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
569  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
570 
571  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
572 
573  // Connect up layers - input -> pad -> conv2d -> output
574  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
575  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
576  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
577 
578  auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool
579  {
580  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
581  const auto conv2dLayerParams = conv2dLayer->GetParameters();
582  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
583  (layer->GetNameStr() == "conv2d") &&
584  (conv2dLayerParams.m_PadLeft == 0) &&
585  (conv2dLayerParams.m_PadRight == 0) &&
586  (conv2dLayerParams.m_PadTop == 0) &&
587  (conv2dLayerParams.m_PadBottom == 0) &&
588  (conv2dLayerParams.m_BiasEnabled == false) &&
589  (conv2dLayerParams.m_StrideX == 1) &&
590  (conv2dLayerParams.m_StrideY == 1) &&
591  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
592  };
593 
594  BOOST_TEST(CheckSequence(graph.cbegin(),
595  graph.cend(),
596  &IsLayerOfType<armnn::InputLayer>,
597  &IsLayerOfType<armnn::PadLayer>,
598  checkSimpleConv2d,
599  &IsLayerOfType<armnn::OutputLayer>));
600 
602 
603  auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool
604  {
605  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
606  const auto conv2dLayerParams = conv2dLayer->GetParameters();
607  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
608  (layer->GetNameStr() == "folded-pad-into-conv2d") &&
609  (conv2dLayerParams.m_PadLeft == 2) &&
610  (conv2dLayerParams.m_PadRight == 2) &&
611  (conv2dLayerParams.m_PadTop == 2) &&
612  (conv2dLayerParams.m_PadBottom == 2) &&
613  (conv2dLayerParams.m_BiasEnabled == false) &&
614  (conv2dLayerParams.m_StrideX == 1) &&
615  (conv2dLayerParams.m_StrideY == 1) &&
616  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
617  };
618 
619  BOOST_TEST(CheckSequence(graph.cbegin(),
620  graph.cend(),
621  &IsLayerOfType<armnn::InputLayer>,
622  checkPadFoldedIntoConv2d,
623  &IsLayerOfType<armnn::OutputLayer>));
624 }
625 
626 
627 
628 
629 class MockLayerSupport : public LayerSupportBase {
630 public:
631  bool IsInputSupported(const TensorInfo& /*input*/,
632  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
633  {
634  return true;
635  }
636 
637  bool IsOutputSupported(const TensorInfo& /*input*/,
638  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
639  {
640  return true;
641  }
642 
643  bool IsActivationSupported(const TensorInfo& /*input0*/,
644  const TensorInfo& /*output*/,
645  const ActivationDescriptor& /*descriptor*/,
646  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
647  {
648  return true;
649  }
650 };
651 
652 template<typename NamePolicy>
653 class MockBackend : public IBackendInternal
654 {
655 public:
656  MockBackend() = default;
657  ~MockBackend() = default;
658 
659  static const BackendId& GetIdStatic() { return NamePolicy::GetIdStatic(); }
660  const BackendId& GetId() const override { return GetIdStatic(); }
661 
662  IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override { return nullptr; };
663 
664  IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(
665  const IBackendInternal::IMemoryManagerSharedPtr&) const override { return nullptr; }
666 
667  IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
668  {
669  return nullptr;
670  }
671 
672  IBackendInternal::Optimizations GetOptimizations() const override { return {}; }
673  IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
674  {
675  return std::make_shared<MockLayerSupport>();
676  }
677 
678  OptimizationViews OptimizeSubgraphView(const SubgraphView&) const override
679  {
680  return {};
681  };
682 };
683 
684 
685 BOOST_AUTO_TEST_CASE(BackendHintTest)
686 {
687  class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
688  {
689  public:
690  void VisitInputLayer(const IConnectableLayer* layer,
691  LayerBindingId id,
692  const char* name = nullptr) override
693  {
694  IgnoreUnused(id, name);
695  auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
696  BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
697  }
698 
699  void VisitOutputLayer(const IConnectableLayer* layer,
700  LayerBindingId id,
701  const char* name = nullptr) override
702  {
703  IgnoreUnused(id, name);
704  auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
705  BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
706  }
707 
708  void VisitActivationLayer(const IConnectableLayer* layer,
709  const ActivationDescriptor& activationDescriptor,
710  const char* name = nullptr) override
711  {
712  IgnoreUnused(activationDescriptor, name);
713  auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
714  BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
715  }
716  };
717 
718  struct CustomPolicy
719  {
720  static const BackendId& GetIdStatic()
721  {
722  static BackendId id="CustomBackend";
723  return id;
724  }
725  };
726 
727  struct MockPolicy
728  {
729  static const BackendId& GetIdStatic()
730  {
731  static BackendId id="MockBackend";
732  return id;
733  }
734  };
735 
736  auto& backendRegistry = BackendRegistryInstance();
737 
738  backendRegistry.Register("MockBackend", [](){
739  return std::make_unique<MockBackend<MockPolicy>>();
740  });
741 
742  backendRegistry.Register("CustomBackend", [](){
743  return std::make_unique<MockBackend<CustomPolicy>>();
744  });
745 
746  // Define the network
747  auto network = INetwork::Create();
750 
751  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
752  auto input = graph->AddLayer<InputLayer>(0, "input");
753  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
754  auto output = graph->AddLayer<OutputLayer>(0, "output");
755 
756  BackendId customBackendId("CustomBackend");
757  act->BackendSelectionHint(customBackendId);
758 
759  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
760  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
761 
762 
763  auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
764 
765  OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
766 
767  // Get the optimized graph
768  Graph& optGraph = optNetObjPtr->GetGraph();
769 
770 
771  std::vector<BackendId> prefs{"MockBackend", "CustomBackend"};
772 
773  BackendIdSet availableBackends = {"CustomBackend", "MockBackend"};
774  DeviceSpec spec(availableBackends);
775 
776  BackendSettings backendSettings(prefs, spec);
777 
778  // Assign an available backend to each layer
779  Graph::Iterator firstLayer = optGraph.begin();
780  Graph::Iterator lastLayer = optGraph.end();
781  OptimizationResult res = AssignBackends(optNetObjPtr,
782  backendSettings,
783  firstLayer,
784  lastLayer,
785  EmptyOptional());
786 
787  BOOST_TEST(res.IsOk());
788 
789  TestBackendAssignment visitor;
790  for (auto it =firstLayer; it != lastLayer; ++it)
791  {
792  (*it)->Accept(visitor);
793  }
794 }
795 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
std::unique_ptr< ScopedCpuTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:69
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:61
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:65
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LstmBasicParameters m_BasicParameters
Definition: LstmLayer.hpp:81
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::vector< OptimizationPtr > Optimizations
const Parameters & GetParameters() const
DataLayout
Definition: Types.hpp:49
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:29
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
float m_ClippingThresProj
Clipping threshold value for the projection.
std::unique_ptr< ScopedCpuTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:33
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:324
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:168
A Convolution2dDescriptor for the Convolution2dLayer.
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:63
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
std::unique_ptr< ScopedCpuTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:73
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a detection postprocess operator.
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
Copyright (c) 2020 ARM Limited.
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a pad operation.
Definition: PadLayer.hpp:14
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:71
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
A ResizeDescriptor for the ResizeLayer.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:49
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:95
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
const std::string & GetNameStr() const
Definition: Layer.hpp:216
uint32_t m_TargetWidth
Target width value.
bool m_PeepholeEnabled
Enable/disable peephole.
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
This layer represents a memory copy operation.
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:31
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
Visitor base class with empty implementations.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:67
LstmOptPeepholeParameters m_PeepholeParameters
Definition: LstmLayer.hpp:84
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
LstmOptProjectionParameters m_ProjectionParameters
Definition: LstmLayer.hpp:83
OptimizationResult AssignBackends(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:269
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
Definition: LstmLayer.hpp:43
BOOST_AUTO_TEST_SUITE_END()
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
std::unique_ptr< ScopedCpuTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:59
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void InferTensorInfos()
Definition: Graph.cpp:493
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::unique_ptr< ScopedCpuTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:51
DataType GetDataType() const
Definition: Layer.cpp:273
LayerType GetType() const
Definition: Layer.hpp:259
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:41
LstmOptCifgParameters m_CifgParameters
Definition: LstmLayer.hpp:82
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:170
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A Pooling2dDescriptor for the Pooling2dLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static void Destroy(IOptimizedNetwork *network)
Definition: Network.cpp:59
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:57
static INetworkPtr Create()
Definition: Network.cpp:49
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
std::unique_ptr< ScopedCpuTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:35
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
std::unique_ptr< IBackendContext > IBackendContextPtr