ArmNN
 21.02
ShapeInferenceTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <boost/test/unit_test.hpp>
7 
8 #include <armnn/Tensor.hpp>
9 #include <Graph.hpp>
10 #include <InternalTypes.hpp>
14 
15 #include <string>
16 
17 BOOST_AUTO_TEST_SUITE(ShapeInferenceTests)
18 using namespace armnn;
19 namespace
20 {
21 
22 constexpr const bool maskPermutations[6][4] = {{false, false, false, false},
23  {true, false, false, false},
24  {false, true, false, false},
25  {false, false, true, false},
26  {false, false, false, true},
27  {true, true, true, true}};
28 
29 template<typename LayerT, typename... Args>
30 LayerT* BuildGraph(Graph* graph, const std::vector<TensorShape>& inputShapes, Args &&... args)
31 {
32  auto layer = graph->AddLayer<LayerT>(std::forward<Args>(args)...);
33 
34  uint32_t inputCount = 0;
35  for (auto inputShape : inputShapes)
36  {
37  TensorInfo inputTensorInfo(inputShape, DataType::Float32);
38 
39  auto input = graph->AddLayer<InputLayer>(static_cast<int>(inputCount), "input");
40  input->GetOutputSlot().SetTensorInfo(inputTensorInfo);
41  input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount));
42  inputCount++;
43  }
44 
45  return layer;
46 }
47 
48 template<typename LayerT>
49 void RunShapeInferenceTest(LayerT* const layer,
50  const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists)
51 {
52  std::vector<unsigned int> numDimensions;
53  std::vector<TensorShape> expectedOutputShapes;
54 
55  for (auto dimensionSizeList : dimensionSizeLists)
56  {
57  numDimensions.emplace_back(dimensionSizeList.size());
58  expectedOutputShapes.emplace_back(TensorShape(dimensionSizeList));
59  }
60 
61  const unsigned int outputSize = layer->GetNumOutputSlots();
62 
63  const auto runTestWithMask = [&](const bool maskPermutations[])
64  {
65  for (unsigned int i = 0; i < outputSize; ++i)
66  {
67  layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations},
69  }
70 
71  layer->ValidateTensorShapesFromInputs();
72 
73  for (unsigned int i = 0; i < outputSize; ++i)
74  {
75  BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
76  }
77  };
78 
79  // Test inference with Dimensionality::NotSpecified
80  for (unsigned int j = 0; j < outputSize; ++j)
81  {
82  layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
83  }
84 
85  layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
86 
87  BOOST_CHECK_THROW(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
88 
89  layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
90  layer->ValidateTensorShapesFromInputs();
91 
92  for (unsigned int i = 0; i < outputSize; ++i)
93  {
94  BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
95  }
96 
97  // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
98  for (unsigned int i = 0; i < numDimensions[0]; ++i)
99  {
100  runTestWithMask(maskPermutations[i]);
101  }
102 
103  // maskPermutations[5] equates to all dimensions being known
104  runTestWithMask(maskPermutations[5]);
105 }
106 
107 template<typename LayerT, typename... Args>
108 void CreateGraphAndRunTest(const std::vector<TensorShape>& inputShapes,
109  const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
110  Args &&... args)
111 {
112  Graph graph(true);
113 
114  auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
115 
116  RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
117 }
118 
119 BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
120 {
121  BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
122  {
123  { "InferAndValidate", true }
124  });
125 
126  INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
127  TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
128 
129  auto inputLayer = network->AddInputLayer(1, "inputLayer");
130  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
131 
132  ActivationDescriptor descriptor;
133  descriptor.m_Function = ActivationFunction::Abs;
134  auto activationLayer = network->AddActivationLayer(descriptor, "activation");
135 
136  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
137  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
138 
139  BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
140 
141  BOOST_CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
142 
143 
144  ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
145  {
146  { "InferAndValidate", false }
147  });
148 
149  network = INetwork::Create({ShapeInferenceMethodOption});
150 
151  inputLayer = network->AddInputLayer(1, "inputLayer");
152  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
153 
154  activationLayer = network->AddActivationLayer(descriptor, "activation");
155 
156  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
157  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
158 
159  BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
160 
161  network = INetwork::Create();
162 
163  inputLayer = network->AddInputLayer(1, "inputLayer");
164  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
165 
166  activationLayer = network->AddActivationLayer(descriptor, "activation");
167 
168  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
169  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
170 
171  BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
172 }
173 
175 {
176  ActivationDescriptor descriptor;
177  descriptor.m_Function = ActivationFunction::Abs;
178  CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation");
179 }
180 
182 {
183  CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
184 }
185 
186 BOOST_AUTO_TEST_CASE(ArgMinMaxTest)
187 {
188  armnn::ArgMinMaxDescriptor descriptor;
189  descriptor.m_Function = ArgMinMaxFunction::Min;
190  descriptor.m_Axis = 1;
191 
192  CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax");
193 }
194 
195 BOOST_AUTO_TEST_CASE(BatchNormalizationTest)
196 {
197  BatchNormalizationDescriptor descriptor;
198  CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm");
199 }
200 
201 BOOST_AUTO_TEST_CASE(BatchToSpaceNdTest)
202 {
203  BatchToSpaceNdDescriptor descriptor;
204 
205  std::vector<unsigned int> blockShape {2, 2};
206  std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
207 
208  descriptor.m_BlockShape = blockShape;
209  descriptor.m_Crops = crops;
210  descriptor.m_DataLayout = DataLayout::NHWC;
211 
212  CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend");
213 }
214 
215 BOOST_AUTO_TEST_CASE(ComparisionTest)
216 {
217  ComparisonDescriptor descriptor;
219  CreateGraphAndRunTest<ComparisonLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }},
220  {{ 5, 7, 6, 2 }},
221  descriptor,
222  "comparision");
223 }
224 
226 {
227  ConcatDescriptor descriptor(2, 3);
228 
229  descriptor.SetViewOriginCoord(0, 0, 0);
230  descriptor.SetViewOriginCoord(1, 0, 1);
231 
232  CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
233 }
234 
235 BOOST_AUTO_TEST_CASE(ConstantTesst)
236 {
237  Graph graph;
238  TensorShape outputShape{ 1, 1, 3, 3 };
239  auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant");
240 
241  const float Datum = 0.0f;
242  ConstTensor output0({outputShape, DataType::Float32}, &Datum);
243  layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(output0);
244 
245  layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
246 
247  layer->ValidateTensorShapesFromInputs();
248 
249  BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
250 }
251 
253 {
254  CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
255 }
256 
257 BOOST_AUTO_TEST_CASE(ConvertFp16ToBf16Test)
258 {
259  const TensorShape tensorShape{5, 7, 6, 2};
260  CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
261 }
262 
263 BOOST_AUTO_TEST_CASE(ConvertFp16ToFp32Test)
264 {
265  CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
266 }
267 
268 BOOST_AUTO_TEST_CASE(ConvertFp32ToFp16Test)
269 {
270  CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
271 }
272 
273 BOOST_AUTO_TEST_CASE(Convolution2dTest)
274 {
275  const TensorShape inputShape{1, 1, 10, 10};
276 
277  Graph graph;
278 
279  Convolution2dDescriptor descriptor;
280 
281  descriptor.m_PadLeft = 0;
282  descriptor.m_PadTop = 0;
283  descriptor.m_PadRight = 0;
284  descriptor.m_PadBottom = 0;
285  descriptor.m_StrideX = 1;
286  descriptor.m_StrideY = 1;
287  descriptor.m_DilationX = 3;
288  descriptor.m_DilationY = 3;
289 
290  auto layer = BuildGraph<Convolution2dLayer>(&graph,
291  {inputShape},
292  descriptor,
293  "conv2d");
294 
295  const float Datum = 0.0f;
296  ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum);
297  layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
298 
299  RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
300 }
301 
302 BOOST_AUTO_TEST_CASE(DebugLayerTest)
303 {
304  const TensorShape tensorShape;
305  CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug");
306 }
307 
308 BOOST_AUTO_TEST_CASE(DepthToSpaceTest)
309 {
310  DepthToSpaceDescriptor descriptor;
311 
312  descriptor.m_BlockSize = 2;
313  descriptor.m_DataLayout = DataLayout::NHWC;
314 
315  CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace");
316 }
317 
318 BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
319 {
321 
322  descriptor.m_StrideX = 2;
323  descriptor.m_StrideY = 1;
324  descriptor.m_PadLeft = 0;
325  descriptor.m_PadRight = 0;
326  descriptor.m_PadTop = 1;
327  descriptor.m_PadBottom = 1;
328  descriptor.m_DilationX = 0;
329  descriptor.m_DilationY = 0;
330  descriptor.m_DataLayout = DataLayout::NHWC;
331  descriptor.m_BiasEnabled = false;
332 
333  Graph graph;
334 
335  auto layer = BuildGraph<DepthwiseConvolution2dLayer>(&graph,
336  {{ 8, 16, 2, 1 }},
337  descriptor,
338  "depthwiseconv2d");
339 
340  const float Datum = 0.0f;
341  ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
342  layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
343 
344  RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
345 }
346 
347 BOOST_AUTO_TEST_CASE(DequantizeTest)
348 {
349  const TensorShape tensorShape{5, 7, 6, 2};
350  CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize");
351 }
352 
353 BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
354 {
355  const TensorShape detectionBoxesInfo{ 1, 3, 4 };
356  const TensorShape detectionScoresInfo{ 1, 3, 4 };
357  const TensorShape detectionClassesInfo{ 1, 3, 4 };
358 
360  descriptor.m_UseRegularNms = true;
361  descriptor.m_MaxDetections = 3;
362  descriptor.m_MaxClassesPerDetection = 1;
363  descriptor.m_DetectionsPerClass =1;
364  descriptor.m_NmsScoreThreshold = 0.0;
365  descriptor.m_NmsIouThreshold = 0.5;
366  descriptor.m_NumClasses = 2;
367  descriptor.m_ScaleY = 10.0;
368  descriptor.m_ScaleX = 10.0;
369  descriptor.m_ScaleH = 5.0;
370  descriptor.m_ScaleW = 5.0;
371 
372  const float Datum = 0.0f;
373  ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32}, &Datum);
374 
375  Graph graph;
376 
377  auto layer = BuildGraph<DetectionPostProcessLayer>(&graph,
378  {detectionBoxesInfo, detectionScoresInfo},
379  descriptor,
380  "detectionpostprocess");
381 
382  layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchorsTensor);
383 
384  RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
385 }
386 
388 {
389  FakeQuantizationDescriptor descriptor;
390  descriptor.m_Max = 1;
391  descriptor.m_Min = 1;
392  CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization");
393 }
394 
395 BOOST_AUTO_TEST_CASE(FloorTest)
396 {
397  const TensorShape tensorShape{5, 7, 6, 2};
398  CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
399 }
400 
402 {
403  Graph graph;
404 
405  const unsigned int inputWidth = 3u;
406  const unsigned int inputHeight = 2u;
407  const unsigned int inputChannels = 1u;
408  const unsigned int outputChannels = 2u;
409 
410  auto layer = BuildGraph<FullyConnectedLayer>(&graph,
411  {{1, inputChannels, inputHeight, inputWidth}},
413  "fc");
414 
415 
416  const float Datum = 0.0f;
417  ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum);
418  layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
419 
420  RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
421 }
422 
423 BOOST_AUTO_TEST_CASE(GatherTest)
424 {
425  CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather");
426 }
427 
428 BOOST_AUTO_TEST_CASE(InstanceNormalizationTest)
429 {
430  const TensorShape tensorShape{5, 7, 6, 2};
431 
432  CreateGraphAndRunTest<InstanceNormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
434  "instancenorm");
435 }
436 
437 BOOST_AUTO_TEST_CASE(L2NormalizationTest)
438 {
439  const TensorShape tensorShape{5, 7, 6, 2};
440 
441  CreateGraphAndRunTest<L2NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
443  "l2norm");
444 }
445 
446 BOOST_AUTO_TEST_CASE(LogSoftMaxTest)
447 {
448  const TensorShape tensorShape{5, 7, 6, 2};
449 
450  CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax");
451 }
452 
453 BOOST_AUTO_TEST_CASE(LstmTest)
454 {
455  const TensorShape inputShape{2, 5};
456  const TensorShape inputCellState{2, 20};
457  const TensorShape expectedOutputShape{2, 20};
458 
459  LstmDescriptor descriptor;
460 
461  descriptor.m_ActivationFunc = 4;
462  descriptor.m_CifgEnabled = false;
463  descriptor.m_PeepholeEnabled = false;
464  descriptor.m_ProjectionEnabled = false;
465 
466  Graph graph;
467  auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm");
468 
469  float Datum = 0.0f;
470  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
471 
472  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
473  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
474  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
475  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
476  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
477  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
478  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
479  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
480  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
481  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
482  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
483  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
484 
485  RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
486 }
487 
488 BOOST_AUTO_TEST_CASE(MeanLayerTest)
489 {
490  MeanDescriptor descriptor;
491  descriptor.m_Axis = {0};
492 
493  CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean");
494 }
495 
496 BOOST_AUTO_TEST_CASE(MemCopyTest)
497 {
498  CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy");
499 }
500 
501 BOOST_AUTO_TEST_CASE(MemImportTest)
502 {
503  CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport");
504 }
505 
506 BOOST_AUTO_TEST_CASE(MergeTest)
507 {
508  const TensorShape tensorShape{ 5, 7, 6, 2 };
509  CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge");
510 }
511 
512 BOOST_AUTO_TEST_CASE(NormalizationTest)
513 {
514  const TensorShape tensorShape{5, 7, 6, 2};
515 
516  CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm");
517 }
518 
519 BOOST_AUTO_TEST_CASE(PermuteTest)
520 {
521  PermuteDescriptor descriptor;
522  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
523 
524  CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute");
525 }
526 
527 BOOST_AUTO_TEST_CASE(Pooling2dTest)
528 {
529  armnn::Pooling2dDescriptor descriptor;
531  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
532  descriptor.m_StrideX = 2;
533  descriptor.m_StrideY = 4;
534  descriptor.m_PadLeft = descriptor.m_PadRight = 3;
535  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
538 
539  CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d");
540 }
541 
543 {
544  const TensorShape inputShape{2, 5};
545  const TensorShape inputCellState{2, 20};
546  const TensorShape expectedOutputShape{2, 20};
547 
548  QLstmDescriptor descriptor;
549 
550  descriptor.m_CifgEnabled = false;
551  descriptor.m_PeepholeEnabled = false;
552  descriptor.m_ProjectionEnabled = false;
553 
554  Graph graph;
555  auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm");
556 
557  float Datum = 0.0f;
558  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
559 
560  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
561  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
562  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
563  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
564  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
565  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
566  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
567  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
568  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
569  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
570  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
571  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
572 
573  RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
574 }
575 
577 {
578  const TensorShape inputShape{2, 5};
579  const TensorShape inputCellState{2, 20};
580  const TensorShape expectedOutputShape{2, 20};
581 
582  Graph graph;
583  auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm");
584 
585  float Datum = 0.0f;
586  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
587 
588  layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
589  layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
590  layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
591  layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
592  layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
593  layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
594  layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
595  layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
596  layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
597  layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
598  layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
599  layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
600 
601  RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
602 }
603 
604 BOOST_AUTO_TEST_CASE(QuantizeTest)
605 {
606  const TensorShape tensorShape { 5, 4, 7, 6 };
607  CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean");
608 }
609 
611 {
612  // due to rank having a scalar output we need a custom test
613  const TensorShape expectedOutputs(Dimensionality::Scalar);
614 
615  Graph graph;
616  auto layer = BuildGraph<RankLayer>(&graph, {{ 1, 1, 1, 1 }}, "rank");
617 
618  layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
619 
620  BOOST_CHECK_THROW(
621  layer->ValidateTensorShapesFromInputs(), LayerValidationException);
622 
623  layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
624 
625  layer->ValidateTensorShapesFromInputs();
626 
627  BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
628 
629  layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
630 
631  layer->ValidateTensorShapesFromInputs();
632 
633  BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
634 }
635 
636 BOOST_AUTO_TEST_CASE(ReshapeTest)
637 {
638  ReshapeDescriptor descriptor;
639 
640  descriptor.m_TargetShape = { 1, 1, 1, 8 };
641 
642  CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape");
643 }
644 
645 BOOST_AUTO_TEST_CASE(ResizeTest)
646 {
647  ResizeDescriptor descriptor;
648 
649  descriptor.m_TargetHeight = 6;
650  descriptor.m_TargetWidth = 2;
651 
652  CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize");
653 }
654 
655 BOOST_AUTO_TEST_CASE(SliceTest)
656 {
657  SliceDescriptor descriptor;
658  descriptor.m_Begin = { 1, 0, 1, 2 };
659  descriptor.m_Size = { 2, 1, 2, 3 };
660 
661  CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean");
662 }
663 
664 BOOST_AUTO_TEST_CASE(SpaceToBatchNdTest)
665 {
666  SpaceToBatchNdDescriptor descriptor;
667 
668  std::vector<unsigned int> blockShape {2, 2};
669  std::vector<std::pair<unsigned int, unsigned int>> padlist = {{0, 0}, {0, 0}};
670 
671  descriptor.m_BlockShape = blockShape;
672  descriptor.m_PadList = padlist;
673  descriptor.m_DataLayout = DataLayout::NHWC;
674 
675  CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd");
676 }
677 
679 {
680  SpaceToDepthDescriptor descriptor;
681 
682  descriptor.m_BlockSize = 2;
683  descriptor.m_DataLayout = DataLayout::NHWC;
684 
685  CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth");
686 }
687 
688 BOOST_AUTO_TEST_CASE(SplitterTest)
689 {
690  SplitterDescriptor descriptor(2, 3);
691 
692  descriptor.SetViewSize(0, 0, 1);
693  descriptor.SetViewSize(0, 1, 2);
694  descriptor.SetViewSize(0, 2, 2);
695 
696  descriptor.SetViewSize(1, 0, 1);
697  descriptor.SetViewSize(1, 1, 2);
698  descriptor.SetViewSize(1, 2, 2);
699 
700  CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter");
701 }
702 
703 BOOST_AUTO_TEST_CASE(StackTest)
704 {
705  StackDescriptor descriptor;
706 
707  descriptor.m_Axis = 0;
708  descriptor.m_NumInputs = 2;
709  descriptor.m_InputShape = { 3, 2, 3 };
710 
711  CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack");
712 }
713 
714 BOOST_AUTO_TEST_CASE(StridedSliceTest)
715 {
716  StridedSliceDescriptor descriptor;
717 
718  descriptor.m_Begin = {0, 0, 0, 0};
719  descriptor.m_End = {3, 2, 3, 1};
720  descriptor.m_Stride = {2, 2, 2, 1};
721 
722  CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice");
723 }
724 
725 BOOST_AUTO_TEST_CASE(Switchtest)
726 {
727  CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch");
728 }
729 
730 BOOST_AUTO_TEST_CASE(TransposeConvolution2dTest)
731 {
732  StridedSliceDescriptor descriptor;
733 
734  descriptor.m_Begin = {0, 0, 0, 0};
735  descriptor.m_End = {3, 2, 3, 1};
736  descriptor.m_Stride = {2, 2, 2, 1};
737 
738  CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t");
739 }
740 
741 BOOST_AUTO_TEST_CASE(TransposeTest)
742 {
743  armnn::TransposeDescriptor descriptor;
744  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
745 
746  CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice");
747 }
748 
750 }
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
LayerTestResult< float, 3 > ConcatTest(IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< float, 2 > FakeQuantizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int8_t, 2 > QLstmTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2021 ARM Limited and Contributors.
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_DilationY
Dilation along y axis.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
A ResizeDescriptor for the ResizeLayer.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
float m_NmsIouThreshold
Intersection over union threshold.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
std::vector< unsigned int > m_BlockShape
Block shape values.
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Validate all output shapes.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
LayerTestResult< uint8_t, 2 > QuantizedLstmTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< float, 4 > ConvertBf16ToFp32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
BOOST_AUTO_TEST_SUITE_END()
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
SoftmaxDescriptor LogSoftmaxDescriptor
A LogSoftmaxDescriptor for the LogSoftmaxLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
Infer missing output shapes and validate all output shapes.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A NormalizationDescriptor for the NormalizationLayer.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510
LayerTestResult< int32_t, 1 > RankTest(armnn::TensorInfo inputTensorInfo, boost::multi_array< T, n > input, armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.