ArmNN  NotReleased
OptimizerTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TestUtils.hpp"
7 
8 #include <Graph.hpp>
9 #include <Optimizer.hpp>
10 
11 
13 
15 
16 #include <boost/test/unit_test.hpp>
17 
18 using namespace armnn;
19 
20 namespace
21 {
22 
23 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
24 {
25  LstmDescriptor layerDesc;
26  layerDesc.m_ActivationFunc = 4;
27  layerDesc.m_ClippingThresCell = 0.2f;
28  layerDesc.m_ClippingThresProj = 0.4f;
29  layerDesc.m_CifgEnabled = CifgEnabled;
30  layerDesc.m_PeepholeEnabled = false;
31  layerDesc.m_ProjectionEnabled = false;
32 
33  LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
34  unsigned int batchSize = 3;
35  unsigned int inputSize = 2;
36  unsigned int numUnits = 4;
37  unsigned int outputSize = 4;
38 
39  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
40  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
41  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
42  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
43  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
44  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
45  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
46  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
47  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
48  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
49  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
50  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
51  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
52  (TensorInfo({ numUnits }, DataType::Float32));
53  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
54  (TensorInfo({ numUnits }, DataType::Float32));
55  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
56  (TensorInfo({ numUnits }, DataType::Float32));
57 
58  layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
59  layer->m_BasicParameters.m_InputToCellWeights->Allocate();
60  layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
64  layer->m_BasicParameters.m_ForgetGateBias->Allocate();
65  layer->m_BasicParameters.m_CellBias->Allocate();
66  layer->m_BasicParameters.m_OutputGateBias->Allocate();
67 
68  if (!layerDesc.m_CifgEnabled)
69  {
70  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>
71  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
72  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>
73  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
74  layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<ScopedCpuTensorHandle>
75  (TensorInfo({ numUnits }, DataType::Float32));
76  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>
77  (TensorInfo({ numUnits }, DataType::Float32));
78  layer->m_CifgParameters.m_InputToInputWeights->Allocate();
80  layer->m_CifgParameters.m_CellToInputWeights->Allocate();
81  layer->m_CifgParameters.m_InputGateBias->Allocate();
82  }
83 
84  if (layerDesc.m_ProjectionEnabled)
85  {
86  layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedCpuTensorHandle>
87  (TensorInfo({ outputSize, numUnits }, DataType::Float32));
88  layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedCpuTensorHandle>
89  (TensorInfo({ outputSize }, DataType::Float32));
91  layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
92  }
93 
94  if (layerDesc.m_PeepholeEnabled)
95  {
96  layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
97  (TensorInfo({ numUnits }, DataType::Float32));
98  layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
99  (TensorInfo({ numUnits }, DataType::Float32));
100  layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
101  layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
102  }
103 
104  // create input and output layers
105  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
106  Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
107  Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
108  Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
109  Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
110  Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
111  Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
112 
113  // connect up
114  armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
115  armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
116  armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
117  armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
119 
120  Connect(input, layer, lstmTensorInfo1, 0, 0);
121  Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
122  Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
123  Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
124  Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
125  Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
126  Connect(layer, output, lstmTensorInfo3, 3, 0);
127 }
128 
129 }
130 
132 using namespace armnn::optimizations;
133 
134 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
135 {
136  Graph graph;
137 
138  //Helper function creates graph containing LSTM layer with required input and output layers
139  CreateLSTMLayerHelper(graph, false);
140 
141  //This function used to call ValidateShapesFromInputs();
142  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
143 }
144 
145 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
146 {
147  Graph graph;
148 
149  //Helper function creates graph containing LSTM layer with required input and output layers
150  CreateLSTMLayerHelper(graph, true);
151 
152  //This function used to call ValidateShapesFromInputs();
153  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
154 }
155 
156 BOOST_AUTO_TEST_CASE(InsertConvertersTest)
157 {
158  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
159 
160  armnn::Graph graph;
161 
162  armnn::LayerBindingId inputId = 0;
163 
164  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
165 
166  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
168 
169  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
170  ->GetOutputHandler().SetTensorInfo(info);
171 
172  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
174 
175  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
177 
178  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
179  ->GetOutputHandler().SetTensorInfo(info);
180 
181  // Check graph layer sequence before inserting convert layers
182  BOOST_TEST(CheckSequence(graph.cbegin(),
183  graph.cend(),
184  &IsLayerOfType<armnn::InputLayer>,
185  &IsLayerOfType<armnn::InputLayer>,
186  &IsLayerOfType<armnn::MemCopyLayer>,
187  &IsLayerOfType<armnn::FloorLayer>,
188  &IsLayerOfType<armnn::AdditionLayer>,
189  &IsLayerOfType<armnn::OutputLayer>));
190 
191  // Check layers have Float16 DataType
192  for (auto& layer : graph)
193  {
194  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
195  {
196  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
197  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
198  }
199  }
200 
201  // Insert convert layers either side of unsupported layer
202  for (auto& layer : graph)
203  {
204  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
205  {
207  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
208  }
209  }
210 
211  // Check layers have correct DataType after inserting convert layers
212  for (auto& layer : graph)
213  {
214  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
215  {
216  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
217  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
218  }
219  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
220  {
221  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
222  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
223  }
224  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
225  {
226  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
227  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
228  }
229  }
230 
231  // Check sequence of layers after inserting convert layers
232  BOOST_TEST(CheckSequence(graph.cbegin(),
233  graph.cend(),
234  &IsLayerOfType<armnn::InputLayer>,
235  &IsLayerOfType<armnn::InputLayer>,
236  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
237  &IsLayerOfType<armnn::MemCopyLayer>,
238  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
239  &IsLayerOfType<armnn::FloorLayer>,
240  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
241  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
242  &IsLayerOfType<armnn::AdditionLayer>,
243  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
244  &IsLayerOfType<armnn::OutputLayer>));
245 }
246 
247 
248 
249 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
250  const unsigned int* weightsShape, const unsigned int* outputShape,
251  DataLayout dataLayout = DataLayout::NCHW)
252 {
253  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
254  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
255 
256  std::vector<float> weightsVector(90);
257  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
258 
260  desc.m_BiasEnabled = false;
261  desc.m_StrideX = 1;
262  desc.m_StrideY = 1;
263  desc.m_DataLayout = dataLayout;
264 
265  Layer* input = graph.AddLayer<InputLayer>(0, "input");
266  input->GetOutputSlot().SetTensorInfo(inputInfo);
267 
268  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
269  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
270  layer->GetOutputSlot().SetTensorInfo(outputInfo);
271 
272  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
273  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
274  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
275 }
276 
277 BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputs)
278 {
279  Graph graph;
280  const unsigned int inputShape[] = { 1, 3, 8, 16 };
281  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
282  const unsigned int outputShape[] = { 1, 2, 4, 14 };
283  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
284 
285  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
286 }
287 
288 BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputsNhwc)
289 {
290  Graph graph;
291  const unsigned int inputShape[] = { 1, 8, 16, 3 };
292  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
293  const unsigned int outputShape[] = { 1, 4, 14, 2 };
294  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
295 
296  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
297 }
298 
299 void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
300  const unsigned int* weightsShape, const unsigned int* outputShape,
301  DataLayout dataLayout = DataLayout::NCHW)
302 {
303  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
304  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
305 
306  std::vector<float> weightsVector(18);
307  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
308 
310  desc.m_BiasEnabled = false;
311  desc.m_StrideX = 1;
312  desc.m_StrideY = 1;
313  desc.m_DataLayout = dataLayout;
314 
315  Layer* input = graph.AddLayer<InputLayer>(0, "input");
316  input->GetOutputSlot().SetTensorInfo(inputInfo);
317 
318  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
319  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
320  layer->GetOutputSlot().SetTensorInfo(outputInfo);
321 
322  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
323  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
324  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
325 }
326 
327 BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputs)
328 {
329  Graph graph;
330  const unsigned int inputShape[] = { 1, 2, 3, 3 };
331  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
332  const unsigned int outputShape[] = { 1, 2, 1, 1 };
333  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
334 
335  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
336 }
337 
338 BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
339 {
340  Graph graph;
341  const unsigned int inputShape[] = { 1, 3, 3, 2 };
342  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
343  const unsigned int outputShape[] = { 1, 1, 1, 2 };
344  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
345 
346  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
347 }
348 
349 void CreatePooling2dGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* outputShape,
350  DataLayout dataLayout = DataLayout::NCHW)
351 {
352  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
353  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
354 
355  Pooling2dDescriptor desc;
357  desc.m_PoolWidth = desc.m_PoolHeight = 100;
358  desc.m_StrideX = desc.m_StrideY = 5;
359  desc.m_PadLeft = 50;
360  desc.m_PadRight = 50;
361  desc.m_PadTop = 50;
362  desc.m_PadBottom = 50;
364  desc.m_DataLayout = dataLayout;
365 
366  Layer* input = graph.AddLayer<InputLayer>(0, "input");
367  input->GetOutputSlot().SetTensorInfo(inputInfo);
368 
369  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
370  layer->GetOutputSlot().SetTensorInfo(outputInfo);
371 
372  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
373  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
374  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
375 }
376 
377 BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputs)
378 {
379  Graph graph;
380  const unsigned int inputShape[] = { 5, 3, 52, 60 };
381  const unsigned int outputShape[] = { 5, 3, 11, 13 };
382  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
383 
384  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
385 }
386 
387 BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputsNhwc)
388 {
389  Graph graph;
390  const unsigned int inputShape[] = { 5, 52, 60, 3 };
391  const unsigned int outputShape[] = { 5, 11, 13, 3 };
392  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
393 
394  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
395 }
396 
397 void CreateResizeBilinearGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* outputShape,
398  DataLayout dataLayout = DataLayout::NCHW)
399 {
400  TensorInfo inputInfo(4, inputShape, DataType::Float32);
401  TensorInfo outputInfo(4, outputShape, DataType::Float32);
402 
403  ResizeDescriptor desc;
405  desc.m_TargetHeight = 3;
406  desc.m_TargetWidth = 4;
407  desc.m_DataLayout = dataLayout;
408 
409  Layer* input = graph.AddLayer<InputLayer>(0, "input");
410  input->GetOutputSlot().SetTensorInfo(inputInfo);
411 
412  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
413  layer->GetOutputSlot().SetTensorInfo(outputInfo);
414 
415  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
416  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
417  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
418 }
419 
420 BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputs)
421 {
422  Graph graph;
423  const unsigned int inputShape[] = { 1, 2, 4, 5 };
424  const unsigned int outputShape[] = { 1, 2, 3, 4 };
425  CreateResizeBilinearGraph(graph, inputShape, outputShape);
426 
427  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
428 }
429 
430 BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputsNhwc)
431 {
432  Graph graph;
433  const unsigned int inputShape[] = { 1, 4, 5, 2 };
434  const unsigned int outputShape[] = { 1, 3, 4, 2 };
435  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
436 
437  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
438 }
439 
440 
441 void CreateGatherGraph(Graph& graph, const armnn::TensorInfo& paramsInfo, const armnn::TensorInfo& indicesInfo,
442  const armnn::TensorInfo& outputInfo)
443 {
444  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
445  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
446 
447  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
448  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
449 
450  GatherLayer* layer = graph.AddLayer<GatherLayer>("gather");
451  layer->GetOutputSlot().SetTensorInfo(outputInfo);
452 
453  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
454  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
455  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
456  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
457 }
458 
459 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs)
460 {
461  Graph graph;
462  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
463  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
464  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
465 
466  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
467 
468  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
469 }
470 
471 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs1DParams)
472 {
473  Graph graph;
474  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
475  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
476  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
477 
478  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
479 
480  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
481 }
482 
483 BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
484 {
485  Graph graph;
486  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
487  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
488  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
489 
490  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
491 
492  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
493 }
494 
495 BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
496 {
497  Graph graph;
498  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
500  std::vector<uint8_t> anchorsVector(40);
502 
503  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
504  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
505  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
506  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
507 
508  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
509  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
510 
511  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
513 
515  descriptor.m_MaxDetections = 3;
516 
517  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
518  layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
519  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
520  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
521  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
522  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
523 
524  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
525  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
526 
527  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
528 }
529 
530 BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
531 {
532  Graph graph;
533  const unsigned int inputShape[] = { 1, 2, 2, 3 };
534  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
535  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
536  const unsigned int outputShape[] = { 1, 2, 1, 1 };
537 
538 
539  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
540  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
541  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
542 
543  Layer* input = graph.AddLayer<InputLayer>(0, "input");
544  input->GetOutputSlot().SetTensorInfo(inputInfo);
545 
546  PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
547 
548  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
549  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
550 
551  Convolution2dDescriptor convolution2dDescriptor;
552  convolution2dDescriptor.m_BiasEnabled = false;
553  convolution2dDescriptor.m_StrideX = 1;
554  convolution2dDescriptor.m_StrideY = 1;
555  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
556 
557  std::vector<float> weightsVector(18);
558  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
559 
560  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d");
561  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
562  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
563 
564  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
565 
566  // Connect up layers - input -> pad -> conv2d -> output
567  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
568  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
569  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
570 
571  auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool
572  {
573  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
574  const auto conv2dLayerParams = conv2dLayer->GetParameters();
575  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
576  (layer->GetNameStr() == "conv2d") &&
577  (conv2dLayerParams.m_PadLeft == 0) &&
578  (conv2dLayerParams.m_PadRight == 0) &&
579  (conv2dLayerParams.m_PadTop == 0) &&
580  (conv2dLayerParams.m_PadBottom == 0) &&
581  (conv2dLayerParams.m_BiasEnabled == false) &&
582  (conv2dLayerParams.m_StrideX == 1) &&
583  (conv2dLayerParams.m_StrideY == 1) &&
584  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
585  };
586 
587  BOOST_TEST(CheckSequence(graph.cbegin(),
588  graph.cend(),
589  &IsLayerOfType<armnn::InputLayer>,
590  &IsLayerOfType<armnn::PadLayer>,
591  checkSimpleConv2d,
592  &IsLayerOfType<armnn::OutputLayer>));
593 
595 
596  auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool
597  {
598  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
599  const auto conv2dLayerParams = conv2dLayer->GetParameters();
600  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
601  (layer->GetNameStr() == "folded-pad-into-conv2d") &&
602  (conv2dLayerParams.m_PadLeft == 2) &&
603  (conv2dLayerParams.m_PadRight == 2) &&
604  (conv2dLayerParams.m_PadTop == 2) &&
605  (conv2dLayerParams.m_PadBottom == 2) &&
606  (conv2dLayerParams.m_BiasEnabled == false) &&
607  (conv2dLayerParams.m_StrideX == 1) &&
608  (conv2dLayerParams.m_StrideY == 1) &&
609  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
610  };
611 
612  BOOST_TEST(CheckSequence(graph.cbegin(),
613  graph.cend(),
614  &IsLayerOfType<armnn::InputLayer>,
615  checkPadFoldedIntoConv2d,
616  &IsLayerOfType<armnn::OutputLayer>));
617 }
618 
bool m_ProjectionEnabled
Enable/disable the projection layer.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
LstmOptPeepholeParameters m_PeepholeParameters
Definition: LstmLayer.hpp:84
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
LayerType GetType() const
Definition: Layer.hpp:259
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:31
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
LstmOptProjectionParameters m_ProjectionParameters
Definition: LstmLayer.hpp:83
float m_ClippingThresCell
Clipping threshold value for the cell state.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:59
This layer represents a detection postprocess operator.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_TargetHeight
Target height value.
std::unique_ptr< ScopedCpuTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:35
This layer represents a depthwise convolution 2d operation.
This layer represents a convolution 2d operation.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:61
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
A PadDescriptor for the PadLayer.
This layer represents a memory copy operation.
uint32_t m_PadRight
Padding right value in the width dimension.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
This layer represents a pooling 2d operation.
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const std::string & GetNameStr() const
Definition: Layer.hpp:216
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:41
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
DataType GetDataType() const
Definition: Layer.cpp:272
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20
std::unique_ptr< ScopedCpuTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:33
LstmOptCifgParameters m_CifgParameters
Definition: LstmLayer.hpp:82
std::unique_ptr< ScopedCpuTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:49
std::unique_ptr< ScopedCpuTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:69
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:65
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:57
const Parameters & GetParameters() const
The padding fields don&#39;t count and are ignored.
void InferTensorInfos()
Definition: Graph.cpp:493
LstmBasicParameters m_BasicParameters
Definition: LstmLayer.hpp:81
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
An LstmDescriptor for the LstmLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
Definition: LstmLayer.hpp:43
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
bool m_BiasEnabled
Enable/disable bias.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:168
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
BOOST_AUTO_TEST_SUITE_END()
uint32_t m_TargetWidth
Target width value.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:63
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
std::unique_ptr< ScopedCpuTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:71
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
DataLayout
Definition: Types.hpp:48
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
Definition: LstmLayer.hpp:29
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
This layer represents an addition operation.
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
DataType GetDataType() const
Definition: Tensor.hpp:95
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
A Pooling2dDescriptor for the Pooling2dLayer.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:170
uint32_t m_ActivationFunc
The activation function to use. 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:73
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Definition: LstmLayer.hpp:51
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:168
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
A ResizeDescriptor for the ResizeLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Definition: LstmLayer.hpp:67
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310
uint32_t m_MaxDetections
Maximum numbers of detections.
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13