ArmNN
 22.02
ShapeInferenceTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <doctest/doctest.h>
7 
8 #include <armnn/Tensor.hpp>
9 #include <Graph.hpp>
10 #include <InternalTypes.hpp>
14 
15 #include <string>
16 
17 TEST_SUITE("ShapeInferenceTests")
18 {
19 using namespace armnn;
20 namespace
21 {
22 
23 constexpr const bool maskPermutations[6][4] = {{false, false, false, false},
24  {true, false, false, false},
25  {false, true, false, false},
26  {false, false, true, false},
27  {false, false, false, true},
28  {true, true, true, true}};
29 
30 template<typename LayerT, typename... Args>
31 LayerT* BuildGraph(Graph* graph, const std::vector<TensorShape>& inputShapes, Args &&... args)
32 {
33  auto layer = graph->AddLayer<LayerT>(std::forward<Args>(args)...);
34 
35  uint32_t inputCount = 0;
36  for (auto inputShape : inputShapes)
37  {
38  TensorInfo inputTensorInfo(inputShape, DataType::Float32);
39 
40  auto input = graph->AddLayer<InputLayer>(static_cast<int>(inputCount), "input");
41  input->GetOutputSlot().SetTensorInfo(inputTensorInfo);
42  input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount));
43  inputCount++;
44  }
45 
46  return layer;
47 }
48 
49 template<typename LayerT>
50 void RunShapeInferenceTest(LayerT* const layer,
51  const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists)
52 {
53  std::vector<unsigned int> numDimensions;
54  std::vector<TensorShape> expectedOutputShapes;
55 
56  for (auto dimensionSizeList : dimensionSizeLists)
57  {
58  numDimensions.emplace_back(dimensionSizeList.size());
59  expectedOutputShapes.emplace_back(TensorShape(dimensionSizeList));
60  }
61 
62  const unsigned int outputSize = layer->GetNumOutputSlots();
63 
64  const auto runTestWithMask = [&](const bool maskPermutations[])
65  {
66  for (unsigned int i = 0; i < outputSize; ++i)
67  {
68  layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations},
70  }
71 
72  layer->ValidateTensorShapesFromInputs();
73 
74  for (unsigned int i = 0; i < outputSize; ++i)
75  {
76  CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
77  }
78  };
79 
80  // Test inference with Dimensionality::NotSpecified
81  for (unsigned int j = 0; j < outputSize; ++j)
82  {
83  layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
84  }
85 
86  layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
87 
88  CHECK_THROWS_AS(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
89 
90  layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
91  layer->ValidateTensorShapesFromInputs();
92 
93  for (unsigned int i = 0; i < outputSize; ++i)
94  {
95  CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
96  }
97 
98  // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
99  for (unsigned int i = 0; i < numDimensions[0]; ++i)
100  {
101  runTestWithMask(maskPermutations[i]);
102  }
103 
104  // maskPermutations[5] equates to all dimensions being known
105  runTestWithMask(maskPermutations[5]);
106 }
107 
108 template<typename LayerT, typename... Args>
109 void CreateGraphAndRunTest(const std::vector<TensorShape>& inputShapes,
110  const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
111  Args &&... args)
112 {
113  Graph graph(true);
114 
115  auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
116 
117  RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
118 }
119 
120 TEST_CASE("NetworkOptionsTest")
121 {
122  BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
123  {
124  { "InferAndValidate", true }
125  });
126 
127  INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
128  TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
129 
130  auto inputLayer = network->AddInputLayer(1, "inputLayer");
131  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
132 
133  ActivationDescriptor descriptor;
134  descriptor.m_Function = ActivationFunction::Abs;
135  auto activationLayer = network->AddActivationLayer(descriptor, "activation");
136 
137  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
138  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
139 
140  CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
141 
142  CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
143 
144 
145  ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
146  {
147  { "InferAndValidate", false }
148  });
149 
150  network = INetwork::Create({ShapeInferenceMethodOption});
151 
152  inputLayer = network->AddInputLayer(1, "inputLayer");
153  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
154 
155  activationLayer = network->AddActivationLayer(descriptor, "activation");
156 
157  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
158  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
159 
160  CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
161 
162  network = INetwork::Create();
163 
164  inputLayer = network->AddInputLayer(1, "inputLayer");
165  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
166 
167  activationLayer = network->AddActivationLayer(descriptor, "activation");
168 
169  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
170  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
171 
172  CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
173 }
174 
175 TEST_CASE("AbsTest")
176 {
177  ActivationDescriptor descriptor;
178  descriptor.m_Function = ActivationFunction::Abs;
179  CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation");
180 }
181 
182 TEST_CASE("AdditionTest")
183 {
184  CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
185 }
186 
187 TEST_CASE("ArgMinMaxTest")
188 {
189  armnn::ArgMinMaxDescriptor descriptor;
190  descriptor.m_Function = ArgMinMaxFunction::Min;
191  descriptor.m_Axis = 1;
192 
193  CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax");
194 }
195 
196 TEST_CASE("BatchNormalizationTest")
197 {
198  BatchNormalizationDescriptor descriptor;
199  CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm");
200 }
201 
202 TEST_CASE("BatchToSpaceNdTest")
203 {
204  BatchToSpaceNdDescriptor descriptor;
205 
206  std::vector<unsigned int> blockShape {2, 2};
207  std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
208 
209  descriptor.m_BlockShape = blockShape;
210  descriptor.m_Crops = crops;
211  descriptor.m_DataLayout = DataLayout::NHWC;
212 
213  CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend");
214 }
215 
216 TEST_CASE("ComparisionTest")
217 {
218  ComparisonDescriptor descriptor;
220  CreateGraphAndRunTest<ComparisonLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }},
221  {{ 5, 7, 6, 2 }},
222  descriptor,
223  "comparision");
224 }
225 
226 TEST_CASE("ConcatTest")
227 {
228  ConcatDescriptor descriptor(2, 3);
229 
230  descriptor.SetViewOriginCoord(0, 0, 0);
231  descriptor.SetViewOriginCoord(1, 0, 1);
232 
233  CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
234 }
235 
236 TEST_CASE("ConstantTest")
237 {
238  Graph graph;
239  TensorShape outputShape{ 1, 1, 3, 3 };
240  auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant");
241 
242  const float Datum = 0.0f;
243  ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum);
244  layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
245 
246  layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
247 
248  layer->ValidateTensorShapesFromInputs();
249 
250  CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
251 }
252 
253 TEST_CASE("ConvertBf16ToFp32Test")
254 {
255  CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
256 }
257 
258 TEST_CASE("ConvertFp16ToBf16Test")
259 {
260  const TensorShape tensorShape{5, 7, 6, 2};
261  CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
262 }
263 
264 TEST_CASE("ConvertFp16ToFp32Test")
265 {
266  CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
267 }
268 
269 TEST_CASE("ConvertFp32ToFp16Test")
270 {
271  CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
272 }
273 
274 TEST_CASE("Convolution2dTest")
275 {
276  const TensorShape inputShape{1, 1, 10, 10};
277 
278  Graph graph;
279 
280  Convolution2dDescriptor descriptor;
281 
282  descriptor.m_PadLeft = 0;
283  descriptor.m_PadTop = 0;
284  descriptor.m_PadRight = 0;
285  descriptor.m_PadBottom = 0;
286  descriptor.m_StrideX = 1;
287  descriptor.m_StrideY = 1;
288  descriptor.m_DilationX = 3;
289  descriptor.m_DilationY = 3;
290 
291  auto layer = BuildGraph<Convolution2dLayer>(&graph,
292  {inputShape},
293  descriptor,
294  "conv2d");
295 
296  const float Datum = 0.0f;
297  ConstTensor weights({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum);
298  layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
299 
300  RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
301 }
302 
303 TEST_CASE("DebugLayerTest")
304 {
305  const TensorShape tensorShape;
306  CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug");
307 }
308 
309 TEST_CASE("DepthToSpaceTest")
310 {
311  DepthToSpaceDescriptor descriptor;
312 
313  descriptor.m_BlockSize = 2;
314  descriptor.m_DataLayout = DataLayout::NHWC;
315 
316  CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace");
317 }
318 
319 TEST_CASE("DepthwiseConvolutionTest")
320 {
322 
323  descriptor.m_StrideX = 2;
324  descriptor.m_StrideY = 1;
325  descriptor.m_PadLeft = 0;
326  descriptor.m_PadRight = 0;
327  descriptor.m_PadTop = 1;
328  descriptor.m_PadBottom = 1;
329  descriptor.m_DilationX = 0;
330  descriptor.m_DilationY = 0;
331  descriptor.m_DataLayout = DataLayout::NHWC;
332  descriptor.m_BiasEnabled = false;
333 
334  Graph graph;
335 
336  auto layer = BuildGraph<DepthwiseConvolution2dLayer>(&graph,
337  {{ 8, 16, 2, 1 }},
338  descriptor,
339  "depthwiseconv2d");
340 
341  const float Datum = 0.0f;
342  ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
343  layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
344 
345  RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
346 }
347 
348 TEST_CASE("DequantizeTest")
349 {
350  const TensorShape tensorShape{5, 7, 6, 2};
351  CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize");
352 }
353 
354 TEST_CASE("DetectionPostProcessTest")
355 {
356  const TensorShape detectionBoxesInfo{ 1, 3, 4 };
357  const TensorShape detectionScoresInfo{ 1, 3, 4 };
358  const TensorShape detectionClassesInfo{ 1, 3, 4 };
359 
361  descriptor.m_UseRegularNms = true;
362  descriptor.m_MaxDetections = 3;
363  descriptor.m_MaxClassesPerDetection = 1;
364  descriptor.m_DetectionsPerClass =1;
365  descriptor.m_NmsScoreThreshold = 0.0;
366  descriptor.m_NmsIouThreshold = 0.5;
367  descriptor.m_NumClasses = 2;
368  descriptor.m_ScaleY = 10.0;
369  descriptor.m_ScaleX = 10.0;
370  descriptor.m_ScaleH = 5.0;
371  descriptor.m_ScaleW = 5.0;
372 
373  const float Datum = 0.0f;
374  ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum);
375 
376  Graph graph;
377 
378  auto layer = BuildGraph<DetectionPostProcessLayer>(&graph,
379  {detectionBoxesInfo, detectionScoresInfo},
380  descriptor,
381  "detectionpostprocess");
382 
383  layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
384 
385  RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
386 }
387 
388 TEST_CASE("FakeQuantizationTest")
389 {
390  FakeQuantizationDescriptor descriptor;
391  descriptor.m_Max = 1;
392  descriptor.m_Min = 1;
393  CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization");
394 }
395 
396 TEST_CASE("FloorTest")
397 {
398  const TensorShape tensorShape{5, 7, 6, 2};
399  CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
400 }
401 
402 TEST_CASE("FullyConnectedTest")
403 {
404  const unsigned int inputWidth = 3u;
405  const unsigned int inputHeight = 2u;
406  const unsigned int inputChannels = 1u;
407  const unsigned int outputChannels = 2u;
408 
409  CreateGraphAndRunTest<FullyConnectedLayer>({{ 1, inputChannels, inputHeight, inputWidth }, // input
410  { inputChannels, outputChannels }}, // weights
411  {{ 1, outputChannels }}, // output
413  "fc");
414 }
415 
416 TEST_CASE("GatherTest")
417 {
418  CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather");
419 }
420 
421 TEST_CASE("InstanceNormalizationTest")
422 {
423  const TensorShape tensorShape{5, 7, 6, 2};
424 
425  CreateGraphAndRunTest<InstanceNormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
427  "instancenorm");
428 }
429 
430 TEST_CASE("L2NormalizationTest")
431 {
432  const TensorShape tensorShape{5, 7, 6, 2};
433 
434  CreateGraphAndRunTest<L2NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
436  "l2norm");
437 }
438 
439 TEST_CASE("LogSoftMaxTest")
440 {
441  const TensorShape tensorShape{5, 7, 6, 2};
442 
443  CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax");
444 }
445 
446 TEST_CASE("LstmTest")
447 {
448  const TensorShape inputShape{2, 5};
449  const TensorShape inputCellState{2, 20};
450  const TensorShape expectedOutputShape{2, 20};
451 
452  LstmDescriptor descriptor;
453 
454  descriptor.m_ActivationFunc = 4;
455  descriptor.m_CifgEnabled = false;
456  descriptor.m_PeepholeEnabled = false;
457  descriptor.m_ProjectionEnabled = false;
458 
459  Graph graph;
460  auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm");
461 
462  float Datum = 0.0f;
463  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
464 
465  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
466  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
467  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
468  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
469  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
470  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
471  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
472  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
473  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
474  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
475  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
476  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
477 
478  RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
479 }
480 
481 TEST_CASE("MeanLayerTest")
482 {
483  MeanDescriptor descriptor;
484  descriptor.m_Axis = {0};
485 
486  CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean");
487 }
488 
489 TEST_CASE("MemCopyTest")
490 {
491  CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy");
492 }
493 
494 TEST_CASE("MemImportTest")
495 {
496  CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport");
497 }
498 
499 TEST_CASE("MergeTest")
500 {
501  const TensorShape tensorShape{ 5, 7, 6, 2 };
502  CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge");
503 }
504 
505 TEST_CASE("NormalizationTest")
506 {
507  const TensorShape tensorShape{5, 7, 6, 2};
508 
509  CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm");
510 }
511 
512 TEST_CASE("PermuteTest")
513 {
514  PermuteDescriptor descriptor;
515  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
516 
517  CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute");
518 }
519 
520 TEST_CASE("Pooling2dTest")
521 {
522  armnn::Pooling2dDescriptor descriptor;
524  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
525  descriptor.m_StrideX = 2;
526  descriptor.m_StrideY = 4;
527  descriptor.m_PadLeft = descriptor.m_PadRight = 3;
528  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
531 
532  CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d");
533 }
534 
535 TEST_CASE("QLstmTest")
536 {
537  const TensorShape inputShape{2, 5};
538  const TensorShape inputCellState{2, 20};
539  const TensorShape expectedOutputShape{2, 20};
540 
541  QLstmDescriptor descriptor;
542 
543  descriptor.m_CifgEnabled = false;
544  descriptor.m_PeepholeEnabled = false;
545  descriptor.m_ProjectionEnabled = false;
546 
547  Graph graph;
548  auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm");
549 
550  float Datum = 0.0f;
551  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
552 
553  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
554  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
555  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
556  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
557  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
558  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
559  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
560  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
561  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
562  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
563  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
564  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
565 
566  RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
567 }
568 
569 TEST_CASE("QuantizedLstmTest")
570 {
571  const TensorShape inputShape{2, 5};
572  const TensorShape inputCellState{2, 20};
573  const TensorShape expectedOutputShape{2, 20};
574 
575  Graph graph;
576  auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm");
577 
578  float Datum = 0.0f;
579  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
580 
581  layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
582  layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
583  layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
584  layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
585  layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
586  layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
587  layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
588  layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
589  layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
590  layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
591  layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
592  layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
593 
594  RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
595 }
596 
597 TEST_CASE("QuantizeTest")
598 {
599  const TensorShape tensorShape { 5, 4, 7, 6 };
600  CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean");
601 }
602 
603 TEST_CASE("RankTest")
604 {
605  // due to rank having a scalar output we need a custom test
606  const TensorShape expectedOutputs(Dimensionality::Scalar);
607 
608  Graph graph;
609  auto layer = BuildGraph<RankLayer>(&graph, {{ 1, 1, 1, 1 }}, "rank");
610 
611  layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
612 
613  CHECK_THROWS_AS(
614  layer->ValidateTensorShapesFromInputs(), LayerValidationException);
615 
616  layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
617 
618  layer->ValidateTensorShapesFromInputs();
619 
620  CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
621 
622  layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
623 
624  layer->ValidateTensorShapesFromInputs();
625 
626  CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
627 }
628 
629 TEST_CASE("ReshapeTest")
630 {
631  ReshapeDescriptor descriptor;
632 
633  descriptor.m_TargetShape = { 1, 1, 1, 8 };
634 
635  CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape");
636 }
637 
638 TEST_CASE("ResizeTest")
639 {
640  ResizeDescriptor descriptor;
641 
642  descriptor.m_TargetHeight = 6;
643  descriptor.m_TargetWidth = 2;
644 
645  CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize");
646 }
647 
648 TEST_CASE("SliceTest")
649 {
650  SliceDescriptor descriptor;
651  descriptor.m_Begin = { 1, 0, 1, 2 };
652  descriptor.m_Size = { 2, 1, 2, 3 };
653 
654  CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean");
655 }
656 
657 TEST_CASE("SpaceToBatchNdTest")
658 {
659  SpaceToBatchNdDescriptor descriptor;
660 
661  std::vector<unsigned int> blockShape {2, 2};
662  std::vector<std::pair<unsigned int, unsigned int>> padlist = {{0, 0}, {0, 0}};
663 
664  descriptor.m_BlockShape = blockShape;
665  descriptor.m_PadList = padlist;
666  descriptor.m_DataLayout = DataLayout::NHWC;
667 
668  CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd");
669 }
670 
671 TEST_CASE("SpaceToDepth")
672 {
673  SpaceToDepthDescriptor descriptor;
674 
675  descriptor.m_BlockSize = 2;
676  descriptor.m_DataLayout = DataLayout::NHWC;
677 
678  CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth");
679 }
680 
681 TEST_CASE("SplitterTest")
682 {
683  SplitterDescriptor descriptor(2, 3);
684 
685  descriptor.SetViewSize(0, 0, 1);
686  descriptor.SetViewSize(0, 1, 2);
687  descriptor.SetViewSize(0, 2, 2);
688 
689  descriptor.SetViewSize(1, 0, 1);
690  descriptor.SetViewSize(1, 1, 2);
691  descriptor.SetViewSize(1, 2, 2);
692 
693  CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter");
694 }
695 
696 TEST_CASE("StackTest")
697 {
698  StackDescriptor descriptor;
699 
700  descriptor.m_Axis = 0;
701  descriptor.m_NumInputs = 2;
702  descriptor.m_InputShape = { 3, 2, 3 };
703 
704  CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack");
705 }
706 
707 TEST_CASE("StridedSliceTest")
708 {
709  StridedSliceDescriptor descriptor;
710 
711  descriptor.m_Begin = {0, 0, 0, 0};
712  descriptor.m_End = {3, 2, 3, 1};
713  descriptor.m_Stride = {2, 2, 2, 1};
714 
715  CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice");
716 }
717 
718 TEST_CASE("Switchtest")
719 {
720  CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch");
721 }
722 
723 TEST_CASE("TransposeConvolution2dTest")
724 {
725  StridedSliceDescriptor descriptor;
726 
727  descriptor.m_Begin = {0, 0, 0, 0};
728  descriptor.m_End = {3, 2, 3, 1};
729  descriptor.m_Stride = {2, 2, 2, 1};
730 
731  CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t");
732 }
733 
734 TEST_CASE("TransposeTest")
735 {
736  armnn::TransposeDescriptor descriptor;
737  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
738 
739  CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice");
740 }
741 
742 }
743 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
TEST_SUITE("ShapeInferenceTests")
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:420
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_DilationY
Dilation along y axis.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
float m_NmsIouThreshold
Intersection over union threshold.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
std::vector< unsigned int > m_BlockShape
Block shape values.
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Validate all output shapes.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
SoftmaxDescriptor LogSoftmaxDescriptor
A LogSoftmaxDescriptor for the LogSoftmaxLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
Infer missing output shapes and validate all output shapes.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:492
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.