ArmNN
 22.05
ShapeInferenceTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <doctest/doctest.h>
7 
8 #include <armnn/Tensor.hpp>
9 #include <Graph.hpp>
10 #include <InternalTypes.hpp>
14 
15 #include <string>
16 
17 TEST_SUITE("ShapeInferenceTests")
18 {
19 using namespace armnn;
20 namespace
21 {
22 
23 constexpr const bool maskPermutations[6][4] = {{false, false, false, false},
24  {true, false, false, false},
25  {false, true, false, false},
26  {false, false, true, false},
27  {false, false, false, true},
28  {true, true, true, true}};
29 
30 template<typename LayerT, typename... Args>
31 LayerT* BuildGraph(Graph* graph, const std::vector<TensorShape>& inputShapes, Args &&... args)
32 {
33  auto layer = graph->AddLayer<LayerT>(std::forward<Args>(args)...);
34 
35  uint32_t inputCount = 0;
36  for (auto inputShape : inputShapes)
37  {
38  TensorInfo inputTensorInfo(inputShape, DataType::Float32);
39 
40  auto input = graph->AddLayer<InputLayer>(static_cast<int>(inputCount), "input");
41  input->GetOutputSlot().SetTensorInfo(inputTensorInfo);
42  input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount));
43  inputCount++;
44  }
45 
46  return layer;
47 }
48 
49 template<typename LayerT>
50 void RunShapeInferenceTest(LayerT* const layer,
51  const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists)
52 {
53  std::vector<unsigned int> numDimensions;
54  std::vector<TensorShape> expectedOutputShapes;
55 
56  for (auto dimensionSizeList : dimensionSizeLists)
57  {
58  numDimensions.emplace_back(dimensionSizeList.size());
59  expectedOutputShapes.emplace_back(TensorShape(dimensionSizeList));
60  }
61 
62  const unsigned int outputSize = layer->GetNumOutputSlots();
63 
64  const auto runTestWithMask = [&](const bool maskPermutations[])
65  {
66  for (unsigned int i = 0; i < outputSize; ++i)
67  {
68  layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations},
70  }
71 
72  layer->ValidateTensorShapesFromInputs();
73 
74  for (unsigned int i = 0; i < outputSize; ++i)
75  {
76  CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
77  }
78  };
79 
80  // Test inference with Dimensionality::NotSpecified
81  for (unsigned int j = 0; j < outputSize; ++j)
82  {
83  layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
84  }
85 
86  layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
87 
88  CHECK_THROWS_AS(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
89 
90  layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
91  layer->ValidateTensorShapesFromInputs();
92 
93  for (unsigned int i = 0; i < outputSize; ++i)
94  {
95  CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
96  }
97 
98  // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
99  for (unsigned int i = 0; i < numDimensions[0]; ++i)
100  {
101  runTestWithMask(maskPermutations[i]);
102  }
103 
104  // maskPermutations[5] equates to all dimensions being known
105  runTestWithMask(maskPermutations[5]);
106 }
107 
108 template<typename LayerT, typename... Args>
109 void CreateGraphAndRunTest(const std::vector<TensorShape>& inputShapes,
110  const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
111  Args &&... args)
112 {
113  Graph graph(true);
114 
115  auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
116 
117  RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
118 }
119 
120 TEST_CASE("NetworkOptionsTest")
121 {
122  BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
123  {
124  { "InferAndValidate", true }
125  });
126 
127  INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
128  TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
129 
130  auto inputLayer = network->AddInputLayer(1, "inputLayer");
131  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
132 
133  ActivationDescriptor descriptor;
134  descriptor.m_Function = ActivationFunction::Abs;
135  auto activationLayer = network->AddActivationLayer(descriptor, "activation");
136 
137  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
138  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
139 
140  CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
141 
142  CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
143 
144 
145  ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
146  {
147  { "InferAndValidate", false }
148  });
149 
150  network = INetwork::Create({ShapeInferenceMethodOption});
151 
152  inputLayer = network->AddInputLayer(1, "inputLayer");
153  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
154 
155  activationLayer = network->AddActivationLayer(descriptor, "activation");
156 
157  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
158  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
159 
160  CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
161 
162  network = INetwork::Create();
163 
164  inputLayer = network->AddInputLayer(1, "inputLayer");
165  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
166 
167  activationLayer = network->AddActivationLayer(descriptor, "activation");
168 
169  inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
170  activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
171 
172  CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
173 }
174 
175 TEST_CASE("AbsTest")
176 {
177  ActivationDescriptor descriptor;
178  descriptor.m_Function = ActivationFunction::Abs;
179  CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation");
180 }
181 
182 TEST_CASE("AdditionTest")
183 {
184  CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
185 }
186 
187 TEST_CASE("ArgMinMaxTest")
188 {
189  armnn::ArgMinMaxDescriptor descriptor;
190  descriptor.m_Function = ArgMinMaxFunction::Min;
191  descriptor.m_Axis = 1;
192 
193  CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax");
194 }
195 
196 TEST_CASE("BatchNormalizationTest")
197 {
198  BatchNormalizationDescriptor descriptor;
199  CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm");
200 }
201 
202 TEST_CASE("BatchToSpaceNdTest")
203 {
204  BatchToSpaceNdDescriptor descriptor;
205 
206  std::vector<unsigned int> blockShape {2, 2};
207  std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
208 
209  descriptor.m_BlockShape = blockShape;
210  descriptor.m_Crops = crops;
211  descriptor.m_DataLayout = DataLayout::NHWC;
212 
213  CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend");
214 }
215 
216 TEST_CASE("ComparisionTest")
217 {
218  ComparisonDescriptor descriptor;
220  CreateGraphAndRunTest<ComparisonLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }},
221  {{ 5, 7, 6, 2 }},
222  descriptor,
223  "comparision");
224 }
225 
226 TEST_CASE("ConcatTest")
227 {
228  ConcatDescriptor descriptor(2, 3);
229 
230  descriptor.SetViewOriginCoord(0, 0, 0);
231  descriptor.SetViewOriginCoord(1, 0, 1);
232 
233  CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
234 }
235 
236 TEST_CASE("ConstantTest")
237 {
238  Graph graph;
239  TensorShape outputShape{ 1, 1, 3, 3 };
240  auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant");
241 
242  const float Datum = 0.0f;
243  ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum);
244  layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
245 
246  layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
247 
248  layer->ValidateTensorShapesFromInputs();
249 
250  CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
251 }
252 
253 TEST_CASE("ConvertBf16ToFp32Test")
254 {
255  CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
256 }
257 
258 TEST_CASE("ConvertFp16ToBf16Test")
259 {
260  const TensorShape tensorShape{5, 7, 6, 2};
261  CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
262 }
263 
264 TEST_CASE("ConvertFp16ToFp32Test")
265 {
266  CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
267 }
268 
269 TEST_CASE("ConvertFp32ToFp16Test")
270 {
271  CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
272 }
273 
274 TEST_CASE("Convolution2dTest")
275 {
276  const TensorShape inputShape{1, 1, 10, 10};
277 
278  Convolution2dDescriptor descriptor;
279 
280  descriptor.m_PadLeft = 0;
281  descriptor.m_PadTop = 0;
282  descriptor.m_PadRight = 0;
283  descriptor.m_PadBottom = 0;
284  descriptor.m_StrideX = 1;
285  descriptor.m_StrideY = 1;
286  descriptor.m_DilationX = 3;
287  descriptor.m_DilationY = 3;
288 
289  CreateGraphAndRunTest<Convolution2dLayer>({ inputShape, { 1, 1, 3, 3 } },
290  { { 1, 1, 4, 4 } }, descriptor,
291  "convd");
292 }
293 
294 TEST_CASE("DebugLayerTest")
295 {
296  const TensorShape tensorShape;
297  CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug");
298 }
299 
300 TEST_CASE("DepthToSpaceTest")
301 {
302  DepthToSpaceDescriptor descriptor;
303 
304  descriptor.m_BlockSize = 2;
305  descriptor.m_DataLayout = DataLayout::NHWC;
306 
307  CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace");
308 }
309 
310 TEST_CASE("DepthwiseConvolutionTest")
311 {
313 
314  descriptor.m_StrideX = 2;
315  descriptor.m_StrideY = 1;
316  descriptor.m_PadLeft = 0;
317  descriptor.m_PadRight = 0;
318  descriptor.m_PadTop = 1;
319  descriptor.m_PadBottom = 1;
320  descriptor.m_DilationX = 0;
321  descriptor.m_DilationY = 0;
322  descriptor.m_DataLayout = DataLayout::NHWC;
323  descriptor.m_BiasEnabled = false;
324 
325  CreateGraphAndRunTest<DepthwiseConvolution2dLayer>({{ 8, 16, 2, 1 }, // input
326  { 2, 5, 3, 2 }}, // weights
327  {{ 8, 18, 1, 2 }}, // output
328  descriptor,
329  "conv2d");
330 }
331 
332 TEST_CASE("DequantizeTest")
333 {
334  const TensorShape tensorShape{5, 7, 6, 2};
335  CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize");
336 }
337 
338 TEST_CASE("DetectionPostProcessTest")
339 {
340  const TensorShape detectionBoxesInfo{ 1, 3, 4 };
341  const TensorShape detectionScoresInfo{ 1, 3, 4 };
342  const TensorShape detectionClassesInfo{ 1, 3, 4 };
343 
345  descriptor.m_UseRegularNms = true;
346  descriptor.m_MaxDetections = 3;
347  descriptor.m_MaxClassesPerDetection = 1;
348  descriptor.m_DetectionsPerClass =1;
349  descriptor.m_NmsScoreThreshold = 0.0;
350  descriptor.m_NmsIouThreshold = 0.5;
351  descriptor.m_NumClasses = 2;
352  descriptor.m_ScaleY = 10.0;
353  descriptor.m_ScaleX = 10.0;
354  descriptor.m_ScaleH = 5.0;
355  descriptor.m_ScaleW = 5.0;
356 
357  const float Datum = 0.0f;
358  ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum);
359 
360  Graph graph;
361 
362  auto layer = BuildGraph<DetectionPostProcessLayer>(&graph,
363  {detectionBoxesInfo, detectionScoresInfo},
364  descriptor,
365  "detectionpostprocess");
366 
367  layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
368 
369  RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
370 }
371 
372 TEST_CASE("FakeQuantizationTest")
373 {
374  FakeQuantizationDescriptor descriptor;
375  descriptor.m_Max = 1;
376  descriptor.m_Min = 1;
377  CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization");
378 }
379 
380 TEST_CASE("FloorTest")
381 {
382  const TensorShape tensorShape{5, 7, 6, 2};
383  CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
384 }
385 
386 TEST_CASE("FullyConnectedTest")
387 {
388  const unsigned int inputWidth = 3u;
389  const unsigned int inputHeight = 2u;
390  const unsigned int inputChannels = 1u;
391  const unsigned int outputChannels = 2u;
392 
393  CreateGraphAndRunTest<FullyConnectedLayer>({{ 1, inputChannels, inputHeight, inputWidth }, // input
394  { inputChannels, outputChannels }}, // weights
395  {{ 1, outputChannels }}, // output
397  "fc");
398 }
399 
400 TEST_CASE("GatherTest")
401 {
402  CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather");
403 }
404 
405 TEST_CASE("InstanceNormalizationTest")
406 {
407  const TensorShape tensorShape{5, 7, 6, 2};
408 
409  CreateGraphAndRunTest<InstanceNormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
411  "instancenorm");
412 }
413 
414 TEST_CASE("L2NormalizationTest")
415 {
416  const TensorShape tensorShape{5, 7, 6, 2};
417 
418  CreateGraphAndRunTest<L2NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
420  "l2norm");
421 }
422 
423 TEST_CASE("LogSoftMaxTest")
424 {
425  const TensorShape tensorShape{5, 7, 6, 2};
426 
427  CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax");
428 }
429 
430 TEST_CASE("LstmTest")
431 {
432  const TensorShape inputShape{2, 5};
433  const TensorShape inputCellState{2, 20};
434  const TensorShape expectedOutputShape{2, 20};
435 
436  LstmDescriptor descriptor;
437 
438  descriptor.m_ActivationFunc = 4;
439  descriptor.m_CifgEnabled = false;
440  descriptor.m_PeepholeEnabled = false;
441  descriptor.m_ProjectionEnabled = false;
442 
443  Graph graph;
444  auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm");
445 
446  float Datum = 0.0f;
447  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
448 
449  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
450  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
451  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
452  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
453  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
454  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
455  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
456  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
457  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
458  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
459  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
460  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
461 
462  RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
463 }
464 
465 TEST_CASE("MeanLayerTest")
466 {
467  MeanDescriptor descriptor;
468  descriptor.m_Axis = {0};
469 
470  CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean");
471 }
472 
473 TEST_CASE("MemCopyTest")
474 {
475  CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy");
476 }
477 
478 TEST_CASE("MemImportTest")
479 {
480  CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport");
481 }
482 
483 TEST_CASE("MergeTest")
484 {
485  const TensorShape tensorShape{ 5, 7, 6, 2 };
486  CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge");
487 }
488 
489 TEST_CASE("NormalizationTest")
490 {
491  const TensorShape tensorShape{5, 7, 6, 2};
492 
493  CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm");
494 }
495 
496 TEST_CASE("PermuteTest")
497 {
498  PermuteDescriptor descriptor;
499  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
500 
501  CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute");
502 }
503 
504 TEST_CASE("Pooling2dTest")
505 {
506  armnn::Pooling2dDescriptor descriptor;
508  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
509  descriptor.m_StrideX = 2;
510  descriptor.m_StrideY = 4;
511  descriptor.m_PadLeft = descriptor.m_PadRight = 3;
512  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
515 
516  CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d");
517 }
518 
519 TEST_CASE("QLstmTest")
520 {
521  const TensorShape inputShape{2, 5};
522  const TensorShape inputCellState{2, 20};
523  const TensorShape expectedOutputShape{2, 20};
524 
525  QLstmDescriptor descriptor;
526 
527  descriptor.m_CifgEnabled = false;
528  descriptor.m_PeepholeEnabled = false;
529  descriptor.m_ProjectionEnabled = false;
530 
531  Graph graph;
532  auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm");
533 
534  float Datum = 0.0f;
535  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
536 
537  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
538  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
539  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
540  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
541  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
542  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
543  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
544  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
545  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
546  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
547  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
548  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
549 
550  RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
551 }
552 
553 TEST_CASE("QuantizedLstmTest")
554 {
555  const TensorShape inputShape{2, 5};
556  const TensorShape inputCellState{2, 20};
557  const TensorShape expectedOutputShape{2, 20};
558 
559  Graph graph;
560  auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm");
561 
562  float Datum = 0.0f;
563  ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
564 
565  layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
566  layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
567  layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
568  layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
569  layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
570  layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
571  layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
572  layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
573  layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
574  layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
575  layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
576  layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
577 
578  RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
579 }
580 
581 TEST_CASE("QuantizeTest")
582 {
583  const TensorShape tensorShape { 5, 4, 7, 6 };
584  CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean");
585 }
586 
587 TEST_CASE("RankTest")
588 {
589  // due to rank having a scalar output we need a custom test
590  const TensorShape expectedOutputs(Dimensionality::Scalar);
591 
592  Graph graph;
593  auto layer = BuildGraph<RankLayer>(&graph, {{ 1, 1, 1, 1 }}, "rank");
594 
595  layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
596 
597  CHECK_THROWS_AS(
598  layer->ValidateTensorShapesFromInputs(), LayerValidationException);
599 
600  layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
601 
602  layer->ValidateTensorShapesFromInputs();
603 
604  CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
605 
606  layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
607 
608  layer->ValidateTensorShapesFromInputs();
609 
610  CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
611 }
612 
613 TEST_CASE("ReshapeTest")
614 {
615  ReshapeDescriptor descriptor;
616 
617  descriptor.m_TargetShape = { 1, 1, 1, 8 };
618 
619  CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape");
620 }
621 
622 TEST_CASE("ResizeTest")
623 {
624  ResizeDescriptor descriptor;
625 
626  descriptor.m_TargetHeight = 6;
627  descriptor.m_TargetWidth = 2;
628 
629  CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize");
630 }
631 
632 TEST_CASE("SliceTest")
633 {
634  SliceDescriptor descriptor;
635  descriptor.m_Begin = { 1, 0, 1, 2 };
636  descriptor.m_Size = { 2, 1, 2, 3 };
637 
638  CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean");
639 }
640 
641 TEST_CASE("SpaceToBatchNdTest")
642 {
643  SpaceToBatchNdDescriptor descriptor;
644 
645  std::vector<unsigned int> blockShape {2, 2};
646  std::vector<std::pair<unsigned int, unsigned int>> padlist = {{0, 0}, {0, 0}};
647 
648  descriptor.m_BlockShape = blockShape;
649  descriptor.m_PadList = padlist;
650  descriptor.m_DataLayout = DataLayout::NHWC;
651 
652  CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd");
653 }
654 
655 TEST_CASE("SpaceToDepth")
656 {
657  SpaceToDepthDescriptor descriptor;
658 
659  descriptor.m_BlockSize = 2;
660  descriptor.m_DataLayout = DataLayout::NHWC;
661 
662  CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth");
663 }
664 
665 TEST_CASE("SplitterTest")
666 {
667  SplitterDescriptor descriptor(2, 3);
668 
669  descriptor.SetViewSize(0, 0, 1);
670  descriptor.SetViewSize(0, 1, 2);
671  descriptor.SetViewSize(0, 2, 2);
672 
673  descriptor.SetViewSize(1, 0, 1);
674  descriptor.SetViewSize(1, 1, 2);
675  descriptor.SetViewSize(1, 2, 2);
676 
677  CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter");
678 }
679 
680 TEST_CASE("StackTest")
681 {
682  StackDescriptor descriptor;
683 
684  descriptor.m_Axis = 0;
685  descriptor.m_NumInputs = 2;
686  descriptor.m_InputShape = { 3, 2, 3 };
687 
688  CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack");
689 }
690 
691 TEST_CASE("StridedSliceTest")
692 {
693  StridedSliceDescriptor descriptor;
694 
695  descriptor.m_Begin = {0, 0, 0, 0};
696  descriptor.m_End = {3, 2, 3, 1};
697  descriptor.m_Stride = {2, 2, 2, 1};
698 
699  CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice");
700 }
701 
702 TEST_CASE("Switchtest")
703 {
704  CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch");
705 }
706 
707 TEST_CASE("TransposeConvolution2dTest")
708 {
709  StridedSliceDescriptor descriptor;
710 
711  descriptor.m_Begin = {0, 0, 0, 0};
712  descriptor.m_End = {3, 2, 3, 1};
713  descriptor.m_Stride = {2, 2, 2, 1};
714 
715  CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t");
716 }
717 
718 TEST_CASE("TransposeTest")
719 {
720  armnn::TransposeDescriptor descriptor;
721  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
722 
723  CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice");
724 }
725 
726 }
727 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
TEST_SUITE("ShapeInferenceTests")
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:425
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_DilationY
Dilation along y axis.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
float m_NmsIouThreshold
Intersection over union threshold.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
std::vector< unsigned int > m_BlockShape
Block shape values.
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Validate all output shapes.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
SoftmaxDescriptor LogSoftmaxDescriptor
A LogSoftmaxDescriptor for the LogSoftmaxLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
Infer missing output shapes and validate all output shapes.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:476
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.