ArmNN
 22.05
InferOutputTests.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <TestUtils.hpp>
9 
10 #include <Graph.hpp>
14 #include <layers/PreluLayer.hpp>
15 #include <layers/StackLayer.hpp>
16 
17 #include <doctest/doctest.h>
18 
20  const std::vector<armnn::TensorShape>& inputShapes,
21  std::vector<armnn::TensorShape>& outputShapes)
22 {
23  armnn::Graph graph;
24  auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
25  outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
26 }
27 
29 {
30  armnn::Graph graph;
31  armnn::ArgMinMaxDescriptor descriptor;
32  descriptor.m_Axis = 2;
33 
34  const std::vector<armnn::TensorShape> inputShapes
35  {
36  { 1, 3, 2, 4 }
37  };
38 
39  std::vector<armnn::TensorShape> outputShapes;
40  CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
41 
42  armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
43  CHECK(outputShapes.size() == 1);
44  CHECK(outputShapes[0] == expectedOutputShape);
45 }
46 
48 {
49  armnn::Graph graph;
50  armnn::ArgMinMaxDescriptor descriptor;
51  descriptor.m_Axis = 0;
52 
53  const std::vector<armnn::TensorShape> inputShapes
54  {
55  { 1, 3, 2 }
56  };
57 
58  std::vector<armnn::TensorShape> outputShapes;
59  CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
60 
61  armnn::TensorShape expectedOutputShape( { 3, 2 } );
62  CHECK(outputShapes.size() == 1);
63  CHECK(outputShapes[0] == expectedOutputShape);
64 }
65 
67 {
68  armnn::Graph graph;
69  armnn::ArgMinMaxDescriptor descriptor;
70  descriptor.m_Axis = 1;
71 
72  const std::vector<armnn::TensorShape> inputShapes
73  {
74  { 3, 2 }
75  };
76 
77  std::vector<armnn::TensorShape> outputShapes;
78  CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
79 
80  armnn::TensorShape expectedOutputShape( { 3 } );
81  CHECK(outputShapes.size() == 1);
82  CHECK(outputShapes[0] == expectedOutputShape);
83 }
84 
86 {
87  armnn::Graph graph;
88  armnn::ArgMinMaxDescriptor descriptor;
89  descriptor.m_Axis = 0;
90 
91  const std::vector<armnn::TensorShape> inputShapes
92  {
93  { 5 }
94  };
95 
96  std::vector<armnn::TensorShape> outputShapes;
97  CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
98 
99  armnn::TensorShape expectedOutputShape( { 1 } );
100  CHECK(outputShapes.size() == 1);
101  CHECK(outputShapes[0] == expectedOutputShape);
102 }
103 
105 {
106  armnn::Graph graph;
107 
109  descriptor.m_BlockShape = {2, 2};
110  descriptor.m_Crops = {{0, 0}, {2, 0}};
112 
113  armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
114  graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
115 
116  std::vector<armnn::TensorShape> shapes;
117  const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
118  armnn::TensorShape shape(4, theDimSizes.data());
119  shapes.push_back(shape);
120 
121  const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
122  armnn::TensorShape expectedShape(4, expectedDimSizes.data());
123 
124  CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
125 }
126 
128 {
129  armnn::Graph graph;
130 
132  descriptor.m_BlockSize = 2;
134 
135  armnn::SpaceToDepthLayer* const spaceToDepthLayer =
136  graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
137 
138  std::vector<armnn::TensorShape> shapes;
139  const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
140  armnn::TensorShape shape(4, dimSizes.data());
141  shapes.push_back(shape);
142 
143  const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
144  armnn::TensorShape expectedShape(4, expectedDimSizes.data());
145 
146  CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
147 }
148 
149 void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
150  std::vector<armnn::TensorShape>& outputShapes)
151 {
152  armnn::Graph graph;
153  armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
154  outputShapes = preluLayer->InferOutputShapes(inputShapes);
155 }
156 
158 {
159  const std::vector<armnn::TensorShape> inputShapes
160  {
161  { 5, 1, 1, 7 }, // Input shape
162  { 5, 4, 3, 1 } // Alpha shape
163  };
164 
165  const std::vector<armnn::TensorShape> expectedOutputShapes
166  {
167  { 5, 4, 3, 7 } // Output shape
168  };
169 
170  std::vector<armnn::TensorShape> outputShapes;
171  CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
172 
173  CHECK(outputShapes.size() == 1);
174  CHECK(outputShapes[0] == expectedOutputShapes[0]);
175 }
176 
178 {
179  const std::vector<armnn::TensorShape> inputShapes
180  {
181  { 4, 1, 4, 8 }, // Input shape
182  { 5, 4, 1 } // Alpha shape
183  };
184 
185  const std::vector<armnn::TensorShape> expectedOutputShapes
186  {
187  { 4, 5, 4, 8 } // Output shape
188  };
189 
190  std::vector<armnn::TensorShape> outputShapes;
191  CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
192 
193  CHECK(outputShapes.size() == 1);
194  CHECK(outputShapes[0] == expectedOutputShapes[0]);
195 }
196 
198 {
199  const std::vector<armnn::TensorShape> inputShapes
200  {
201  { 4, 1, 2 }, // Input shape
202  { 5, 4, 3, 1 } // Alpha shape
203  };
204 
205  const std::vector<armnn::TensorShape> expectedOutputShapes
206  {
207  { 5, 4, 3, 2 } // Output shape
208  };
209 
210  std::vector<armnn::TensorShape> outputShapes;
211  CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
212 
213  CHECK(outputShapes.size() == 1);
214  CHECK(outputShapes[0] == expectedOutputShapes[0]);
215 }
216 
218 {
219  const std::vector<armnn::TensorShape> inputShapes
220  {
221  { 4, 1, 2 }, // Input shape
222  { 5, 4, 3, 1 } // Alpha shape
223  };
224 
225  const std::vector<armnn::TensorShape> expectedOutputShapes
226  {
227  { 5, 7, 3, 2 } // Output shape
228  };
229 
230  std::vector<armnn::TensorShape> outputShapes;
231  CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
232 
233  CHECK(outputShapes.size() == 1);
234  CHECK(outputShapes[0] != expectedOutputShapes[0]);
235 }
236 
238  const armnn::TensorShape& inputShape,
239  const armnn::TensorShape& alphaShape,
240  const armnn::TensorShape& outputShape)
241 {
242  // Creates the PReLU layer
243  armnn::Layer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
244 
245  // Creates extra layers
246  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer> (0, "input");
247  armnn::Layer* const alpha = graph.AddLayer<armnn::InputLayer> (1, "alpha");
248  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
249 
250  // Connects up
251  armnn::TensorInfo inputTensorInfo (inputShape, armnn::DataType::Float32);
252  armnn::TensorInfo alphaTensorInfo (alphaShape, armnn::DataType::Float32);
253  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
254  Connect(input, preluLayer, inputTensorInfo, 0, 0);
255  Connect(alpha, preluLayer, alphaTensorInfo, 0, 1);
256  Connect(preluLayer, output, outputTensorInfo, 0, 0);
257 }
258 
260 {
261  armnn::Graph graph;
262 
263  // Creates the PReLU layer
264  CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
265 
266  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
267  CHECK_NOTHROW(graph.InferTensorInfos());
268 }
269 
271 {
272  armnn::Graph graph;
273 
274  // Creates the PReLU layer
275  CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
276 
277  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
278  CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
279 }
280 
282  const std::vector<armnn::TensorShape>& inputShapes,
283  std::vector<armnn::TensorShape>& outputShapes)
284 {
285  armnn::Graph graph;
286  armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
287  outputShapes = stackLayer->InferOutputShapes(inputShapes);
288 }
289 
291 {
292  armnn::Graph graph;
293 
294  armnn::StackDescriptor descriptor;
295  descriptor.m_Axis = 1;
296  descriptor.m_NumInputs = 3;
297  descriptor.m_InputShape = armnn::TensorShape
298  (
299  { 4, 2 } // Defined input shape
300  );
301 
302  const std::vector<armnn::TensorShape> inputShapes
303  {
304  { 4, 2 }, // Actual input shapes
305  { 4, 2 },
306  { 4, 2 }
307  };
308 
309  std::vector<armnn::TensorShape> outputShapes;
310  CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
311 
312  armnn::TensorShape expectedOutputShape
313  (
314  { 4, 3, 2 }
315  );
316  CHECK(outputShapes.size() == 1);
317  CHECK(outputShapes[0] == expectedOutputShape);
318 }
319 
321 {
322  armnn::Graph graph;
323 
324  armnn::StackDescriptor descriptor;
325  descriptor.m_Axis = 1;
326  descriptor.m_NumInputs = 3;
327  descriptor.m_InputShape = armnn::TensorShape
328  (
329  { 4, 2 } // Defined input shape
330  );
331 
332  const std::vector<armnn::TensorShape> inputShapes
333  {
334  { 4, 2 }, // Actual input shapes
335  { 4, 5 }, // Incorrectly shaped input tensor
336  { 4, 2 }
337  };
338 
339  // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
340  std::vector<armnn::TensorShape> outputShapes;
341  CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
342 
343  armnn::TensorShape expectedOutputShape
344  (
345  { 4, 3, 2 }
346  );
347  CHECK(outputShapes.size() == 1);
348  CHECK(outputShapes[0] == expectedOutputShape);
349 }
350 
352  const armnn::StackDescriptor& descriptor,
353  const std::vector<armnn::TensorShape>& inputShapes,
354  const armnn::TensorShape& outputShape)
355 {
356  // Creates the Stack layer
357  armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
358 
359  // Creates extra layers
360  std::vector<armnn::Layer*> inputs;
361  for (unsigned int i=0; i<inputShapes.size(); ++i)
362  {
363  inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
364  }
365  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
366 
367  // Connects up
368  std::vector<armnn::TensorInfo> inputTensorInfos;
369  for (unsigned int i=0; i<inputs.size(); ++i)
370  {
371  inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
372  }
373  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
374 
375  for (unsigned int i=0; i<inputs.size(); ++i)
376  {
377  Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
378  }
379  Connect(stackLayer, output, outputTensorInfo, 0, 0);
380 }
381 
383 {
384  armnn::Graph graph;
385 
386  armnn::StackDescriptor descriptor;
387  descriptor.m_Axis = 0;
388  descriptor.m_NumInputs = 3;
389  descriptor.m_InputShape = armnn::TensorShape
390  (
391  { 2, 5 } // Defined input shape
392  );
393 
394  const std::vector<armnn::TensorShape> inputShapes
395  {
396  { 2, 5 }, // Actual input shapes
397  { 2, 5 },
398  { 2, 5 }
399  };
400 
401  // Creates the Stack layer
402  CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
403 
404  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
405  CHECK_NOTHROW(graph.InferTensorInfos());
406 }
407 
409 {
410  armnn::Graph graph;
411 
412  armnn::StackDescriptor descriptor;
413  descriptor.m_Axis = 0;
414  descriptor.m_NumInputs = 3;
415  descriptor.m_InputShape = armnn::TensorShape
416  (
417  { 2, 5 } // Defined input shape
418  );
419 
420  const std::vector<armnn::TensorShape> inputShapes
421  {
422  { 2, 5 }, // Actual input shapes
423  { 2, 2 }, // Incorrectly shaped input tensor
424  { 2, 5 }
425  };
426 
427  // Creates the Stack layer
428  CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
429 
430  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
431  CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
432 }
433 
435 {
436  armnn::Graph graph;
437 
439  descriptor.m_DilationX = 2;
440  descriptor.m_DilationY = 2;
441  descriptor.m_PadTop = 1;
442  descriptor.m_PadBottom = 1;
443  descriptor.m_PadLeft = 1;
444  descriptor.m_PadRight = 1;
445  descriptor.m_StrideX = 3;
446  descriptor.m_StrideY = 3;
448 
449  armnn::Convolution2dLayer* const convolution2dLayer =
450  graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "convolution2d");
451 
452  std::vector<armnn::TensorShape> shapes;
453  const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
454  armnn::TensorShape inputShape(4, inputSize.data());
455  shapes.push_back(inputShape);
456 
457  const std::vector<unsigned int> filterSize = { 1, 2, 2, 2};
458  armnn::TensorShape filterShape(4, filterSize.data());
459  shapes.push_back(filterShape);
460 
461  const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
462  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
463 
464  CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
465 }
466 
468 {
469  armnn::Graph graph;
470 
472  descriptor.m_DilationX = 1;
473  descriptor.m_DilationY = 1;
474  descriptor.m_DilationZ = 1;
475  descriptor.m_PadTop = 1;
476  descriptor.m_PadBottom = 1;
477  descriptor.m_PadLeft = 1;
478  descriptor.m_PadRight = 1;
479  descriptor.m_PadFront = 1;
480  descriptor.m_PadBack = 1;
481  descriptor.m_StrideX = 2;
482  descriptor.m_StrideY = 2;
483  descriptor.m_StrideZ = 2;
485 
486  armnn::Convolution3dLayer* const convolution3dLayer =
487  graph.AddLayer<armnn::Convolution3dLayer>(descriptor, "convolution3d");
488 
489  std::vector<armnn::TensorShape> shapes;
490  const std::vector<unsigned int> inputSize = {1, 5, 5, 5, 1};
491  armnn::TensorShape inputShape(5, inputSize.data());
492  shapes.push_back(inputShape);
493 
494  const std::vector<unsigned int> filterSize = {3, 3, 3, 1, 1 };
495  armnn::TensorShape filterShape(5, filterSize.data());
496  shapes.push_back(filterShape);
497 
498  const std::vector<unsigned int> expectedOutputSizes = {1, 3, 3, 3, 1};
499  armnn::TensorShape expectedOutputShape(5, expectedOutputSizes.data());
500 
501  CHECK(expectedOutputShape == convolution3dLayer->InferOutputShapes(shapes).at(0));
502 }
503 
505 {
506  armnn::Graph graph;
507 
509  descriptor.m_PadTop = 0;
510  descriptor.m_PadBottom = 1;
511  descriptor.m_PadLeft = 0;
512  descriptor.m_PadRight = 1;
513  descriptor.m_StrideX = 2;
514  descriptor.m_StrideY = 2;
516 
517  armnn::TransposeConvolution2dLayer* const transposeConvolution2dLayer =
518  graph.AddLayer<armnn::TransposeConvolution2dLayer>(descriptor, "TransposeConvolution2d");
519 
520  std::vector<armnn::TensorShape> shapes;
521  const std::vector<unsigned int> inputSize = {1, 2, 3, 3};
522  armnn::TensorShape inputShape(4, inputSize.data());
523  shapes.push_back(inputShape);
524 
525  const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
526  armnn::TensorShape filterShape(4, filterSize.data());
527  shapes.push_back(filterShape);
528 
529  const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
530  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
531 
532  CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
533 }
534 
536 {
537  armnn::Graph graph;
538 
540  descriptor.m_DilationX = 3;
541  descriptor.m_DilationY = 3;
542  descriptor.m_PadTop = 1;
543  descriptor.m_PadBottom = 2;
544  descriptor.m_PadLeft = 1;
545  descriptor.m_PadRight = 2;
546  descriptor.m_StrideX = 2;
547  descriptor.m_StrideY = 2;
549 
550  armnn::DepthwiseConvolution2dLayer* const depthwiseConvolution2dLayer =
551  graph.AddLayer<armnn::DepthwiseConvolution2dLayer>(descriptor, "DepthwiseConvolution2d");
552 
553  std::vector<armnn::TensorShape> shapes;
554  const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
555  armnn::TensorShape inputShape(4, inputSize.data());
556  shapes.push_back(inputShape);
557 
558  const std::vector<unsigned int> filterSize = { 1, 3, 3, 2 };
559  armnn::TensorShape filterShape(4, filterSize.data());
560  shapes.push_back(filterShape);
561 
562  const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
563  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
564 
565  CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
566 }
567 
569 {
570  armnn::Graph graph;
571 
572  armnn::Pooling3dDescriptor descriptor;
574  descriptor.m_PoolDepth = 2;
575  descriptor.m_PoolHeight = 2;
576  descriptor.m_PoolWidth = 2;
577  descriptor.m_PadTop = 1;
578  descriptor.m_PadBottom = 1;
579  descriptor.m_PadLeft = 1;
580  descriptor.m_PadRight = 1;
581  descriptor.m_PadFront = 1;
582  descriptor.m_PadBack = 1;
583  descriptor.m_StrideX = 2;
584  descriptor.m_StrideY = 2;
585  descriptor.m_StrideZ = 2;
587 
588  armnn::Pooling3dLayer* const pooling3dLayer =
589  graph.AddLayer<armnn::Pooling3dLayer>(descriptor, "pooling3d");
590 
591  std::vector<armnn::TensorShape> shapes;
592  const std::vector<unsigned int> inputSize = {1, 4, 4, 4, 1};
593  armnn::TensorShape inputShape(5, inputSize.data());
594  shapes.push_back(inputShape);
595 
596  const std::vector<unsigned int> expectedOutputSizes = {1, 3, 3, 3, 1};
597  armnn::TensorShape expectedOutputShape(5, expectedOutputSizes.data());
598 
599  CHECK(expectedOutputShape == pooling3dLayer->InferOutputShapes(shapes).at(0));
600 }
601 
602 // QLstm
604  const std::vector<armnn::TensorShape>& inputShapes,
605  std::vector<armnn::TensorShape>& outputShapes)
606 {
607  armnn::Graph graph;
608  armnn::QLstmLayer* const qLstmLayer = graph.AddLayer<armnn::QLstmLayer>(descriptor, "qLstm");
609  outputShapes = qLstmLayer->InferOutputShapes(inputShapes);
610 }
611 
613 {
614  armnn::QLstmDescriptor descriptor;
615  descriptor.m_PeepholeEnabled = true;
616  descriptor.m_CifgEnabled = false;
617  descriptor.m_ProjectionEnabled = false;
618 
619  // Input shapes
620  const std::vector<unsigned int> inputShape{ 2, 5 };
621  const std::vector<unsigned int> previousOutputInShape{ 2, 4 };
622  const std::vector<unsigned int> previousCellStateInShape{ 2, 4 };
623 
624  armnn::TensorShape inputTensorShape(2, inputShape.data());
625  armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
626  armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
627 
628  std::vector<armnn::TensorShape> inShapes
629  {
630  inputTensorShape,
631  previousOutputInTensorShape,
632  previousCellStateInTensorShape
633  };
634 
635  // Output shapes
636  const std::vector<unsigned int> outputStateOutShape{ 2, 4 };
637  const std::vector<unsigned int> cellStateOutShape{ 2, 4 };
638  const std::vector<unsigned int> outputShape{ 2, 4 };
639  armnn::TensorShape outputStateOutTensorShape(2, outputShape.data());
640  armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
641  armnn::TensorShape outputTensorShape(2, outputShape.data());
642 
643  std::vector<armnn::TensorShape> expectedOutShapes
644  {
645  outputStateOutTensorShape,
646  cellStateOutTensorShape,
647  outputTensorShape
648  };
649 
650  std::vector<armnn::TensorShape> actualOutShapes;
651  CHECK_NOTHROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
652 
653  CHECK(actualOutShapes.size() == 3);
654  CHECK(expectedOutShapes[0] == actualOutShapes[0]);
655  CHECK(expectedOutShapes[1] == actualOutShapes[1]);
656  CHECK(expectedOutShapes[2] == actualOutShapes[2]);
657 }
658 
659 // QuantizedLstm
660 void QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
661  std::vector<armnn::TensorShape>& outputShapes)
662 {
663  armnn::Graph graph;
664  armnn::QuantizedLstmLayer* const quantizedLstmLayer = graph.AddLayer<armnn::QuantizedLstmLayer>("quantizedLstm");
665  outputShapes = quantizedLstmLayer->InferOutputShapes(inputShapes);
666 }
667 
669 {
670  // Input shapes
671  const std::vector<unsigned int> inputShape{ 2, 5 };
672  const std::vector<unsigned int> previousCellStateInShape{ 2, 10 };
673  const std::vector<unsigned int> previousOutputInShape{ 2, 10 };
674  armnn::TensorShape inputTensorShape(2, inputShape.data());
675  armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
676  armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
677 
678  std::vector<armnn::TensorShape> inShapes
679  {
680  inputTensorShape,
681  previousCellStateInTensorShape,
682  previousOutputInTensorShape
683  };
684 
685  // Output shapes
686  const std::vector<unsigned int> cellStateOutShape{ 2, 10 };
687  const std::vector<unsigned int> outputShape{ 2, 10 };
688  armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
689  armnn::TensorShape outputTensorShape(2, outputShape.data());
690 
691  std::vector<armnn::TensorShape> expectedOutShapes
692  {
693  cellStateOutTensorShape,
694  outputTensorShape
695  };
696 
697  std::vector<armnn::TensorShape> actualOutShapes;
698  CHECK_NOTHROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
699 
700  CHECK(actualOutShapes.size() == 2);
701  CHECK(expectedOutShapes[0] == actualOutShapes[0]);
702  CHECK(expectedOutShapes[1] == actualOutShapes[1]);
703 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_Axis
0-based axis along which to stack the input tensors.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
This layer represents a 2D transpose convolution operation.
void QuantizedLstmInferOutputShapeTest()
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PoolDepth
Pooling depth value.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
Definition: StackLayer.cpp:35
uint32_t m_PadBack
Padding back value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
TensorShape m_InputShape
Required shape of all input tensors.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:425
void PreluValidateTensorShapesFromInputsMatchTest()
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
void ArgMinMaxInferOutputShape1dTest()
This layer represents a SpaceToDepth operation.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void PreluInferOutputShapeInputBiggerTest()
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
void TransposeConvolution2dInferOutputShapeTest()
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
void ArgMinMaxInferOutputShape4dTest()
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_DilationY
Dilation along y axis.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void DepthwiseConvolution2dInferOutputShapeTest()
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void Pooling3dInferOutputShapeTest()
A StackDescriptor for the StackLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
void PreluInferOutputShapeAlphaBiggerTest()
void StackValidateTensorShapesFromInputsMatchTest()
void StackInferOutputShapeFromInputsNoMatchTest()
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
void CreateStackLayerHelper(armnn::Graph &graph, const armnn::StackDescriptor &descriptor, const std::vector< armnn::TensorShape > &inputShapes, const armnn::TensorShape &outputShape)
void SpaceToDepthInferOutputShapeTest()
void BatchToSpaceInferOutputShapeTest()
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a QuantizedLstm operation.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
Definition: PreluLayer.cpp:38
std::vector< unsigned int > m_BlockShape
Block shape values.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
void CreatePreluLayerHelper(armnn::Graph &graph, const armnn::TensorShape &inputShape, const armnn::TensorShape &alphaShape, const armnn::TensorShape &outputShape)
This layer represents a stack operation.
Definition: StackLayer.hpp:13
void Convolution2dInferOutputShapeTest()
uint32_t m_PadBottom
Padding bottom value in the height dimension.
This layer represents a BatchToSpaceNd operation.
void ArgMinMaxInferOutputShape3dTest()
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a ArgMinMax operation.
A QLstmDescriptor for the QLstmLayer.
void QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_PadFront
Padding front value in the depth dimension.
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_PadLeft
Padding left value in the width dimension.
void QuantizedLstmInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
This layer represents a pooling 3d operation.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void PreluInferOutputShapeSameDimsTest()
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void StackValidateTensorShapesFromInputsNoMatchTest()
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void PreluInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
void PreluValidateTensorShapesFromInputsNoMatchTest()
void StackInferOutputShapeFromInputsMatchTest()
void InferTensorInfos()
Definition: Graph.cpp:562
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
void ArgMinMaxInferOutputShape2dTest()
This layer represents a convolution 2d operation.
This layer represents a convolution 3d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
void QLstmInferOutputShapeTest()
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from a given input shape and axis parameter.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void PreluInferOutputShapeNoMatchTest()
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
Definition: QLstmLayer.cpp:153
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
uint32_t m_DilationZ
Dilation along z axis.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
uint32_t m_PadLeft
Padding left value in the width dimension.
void Convolution3dInferOutputShapeTest()
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
uint32_t m_PadRight
Padding right value in the width dimension.