ArmNN  NotReleased
InferOutputTests.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 
11 #include <Graph.hpp>
15 #include <layers/PreluLayer.hpp>
16 #include <layers/StackLayer.hpp>
17 
18 #include <boost/algorithm/string.hpp>
19 #include <boost/test/unit_test.hpp>
20 
22  const std::vector<armnn::TensorShape>& inputShapes,
23  std::vector<armnn::TensorShape>& outputShapes)
24 {
25  armnn::Graph graph;
26  auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
27  outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
28 }
29 
31 {
32  armnn::Graph graph;
33  armnn::ArgMinMaxDescriptor descriptor;
34  descriptor.m_Axis = 2;
35 
36  const std::vector<armnn::TensorShape> inputShapes
37  {
38  { 1, 3, 2, 4 }
39  };
40 
41  std::vector<armnn::TensorShape> outputShapes;
42  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
43 
44  armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
45  BOOST_CHECK(outputShapes.size() == 1);
46  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
47 }
48 
50 {
51  armnn::Graph graph;
52  armnn::ArgMinMaxDescriptor descriptor;
53  descriptor.m_Axis = 0;
54 
55  const std::vector<armnn::TensorShape> inputShapes
56  {
57  { 1, 3, 2 }
58  };
59 
60  std::vector<armnn::TensorShape> outputShapes;
61  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
62 
63  armnn::TensorShape expectedOutputShape( { 3, 2 } );
64  BOOST_CHECK(outputShapes.size() == 1);
65  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
66 }
67 
69 {
70  armnn::Graph graph;
71  armnn::ArgMinMaxDescriptor descriptor;
72  descriptor.m_Axis = 1;
73 
74  const std::vector<armnn::TensorShape> inputShapes
75  {
76  { 3, 2 }
77  };
78 
79  std::vector<armnn::TensorShape> outputShapes;
80  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
81 
82  armnn::TensorShape expectedOutputShape( { 3 } );
83  BOOST_CHECK(outputShapes.size() == 1);
84  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
85 }
86 
88 {
89  armnn::Graph graph;
90  armnn::ArgMinMaxDescriptor descriptor;
91  descriptor.m_Axis = 0;
92 
93  const std::vector<armnn::TensorShape> inputShapes
94  {
95  { 5 }
96  };
97 
98  std::vector<armnn::TensorShape> outputShapes;
99  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
100 
101  armnn::TensorShape expectedOutputShape( { 1 } );
102  BOOST_CHECK(outputShapes.size() == 1);
103  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
104 }
105 
107 {
108  armnn::Graph graph;
109 
111  descriptor.m_BlockShape = {2, 2};
112  descriptor.m_Crops = {{0, 0}, {2, 0}};
114 
115  armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
116  graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
117 
118  std::vector<armnn::TensorShape> shapes;
119  const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
120  armnn::TensorShape shape(4, theDimSizes.data());
121  shapes.push_back(shape);
122 
123  const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
124  armnn::TensorShape expectedShape(4, expectedDimSizes.data());
125 
126  BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
127 }
128 
130 {
131  armnn::Graph graph;
132 
134  descriptor.m_BlockSize = 2;
136 
137  armnn::SpaceToDepthLayer* const spaceToDepthLayer =
138  graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
139 
140  std::vector<armnn::TensorShape> shapes;
141  const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
142  armnn::TensorShape shape(4, dimSizes.data());
143  shapes.push_back(shape);
144 
145  const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
146  armnn::TensorShape expectedShape(4, expectedDimSizes.data());
147 
148  BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
149 }
150 
151 void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
152  std::vector<armnn::TensorShape>& outputShapes)
153 {
154  armnn::Graph graph;
155  armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
156  outputShapes = preluLayer->InferOutputShapes(inputShapes);
157 }
158 
160 {
161  const std::vector<armnn::TensorShape> inputShapes
162  {
163  { 5, 1, 1, 7 }, // Input shape
164  { 5, 4, 3, 1 } // Alpha shape
165  };
166 
167  const std::vector<armnn::TensorShape> expectedOutputShapes
168  {
169  { 5, 4, 3, 7 } // Output shape
170  };
171 
172  std::vector<armnn::TensorShape> outputShapes;
173  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
174 
175  BOOST_CHECK(outputShapes.size() == 1);
176  BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
177 }
178 
180 {
181  const std::vector<armnn::TensorShape> inputShapes
182  {
183  { 4, 1, 4, 8 }, // Input shape
184  { 5, 4, 1 } // Alpha shape
185  };
186 
187  const std::vector<armnn::TensorShape> expectedOutputShapes
188  {
189  { 4, 5, 4, 8 } // Output shape
190  };
191 
192  std::vector<armnn::TensorShape> outputShapes;
193  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
194 
195  BOOST_CHECK(outputShapes.size() == 1);
196  BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
197 }
198 
200 {
201  const std::vector<armnn::TensorShape> inputShapes
202  {
203  { 4, 1, 2 }, // Input shape
204  { 5, 4, 3, 1 } // Alpha shape
205  };
206 
207  const std::vector<armnn::TensorShape> expectedOutputShapes
208  {
209  { 5, 4, 3, 2 } // Output shape
210  };
211 
212  std::vector<armnn::TensorShape> outputShapes;
213  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
214 
215  BOOST_CHECK(outputShapes.size() == 1);
216  BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
217 }
218 
220 {
221  const std::vector<armnn::TensorShape> inputShapes
222  {
223  { 4, 1, 2 }, // Input shape
224  { 5, 4, 3, 1 } // Alpha shape
225  };
226 
227  const std::vector<armnn::TensorShape> expectedOutputShapes
228  {
229  { 5, 7, 3, 2 } // Output shape
230  };
231 
232  std::vector<armnn::TensorShape> outputShapes;
233  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
234 
235  BOOST_CHECK(outputShapes.size() == 1);
236  BOOST_CHECK(outputShapes[0] != expectedOutputShapes[0]);
237 }
238 
240  const armnn::TensorShape& inputShape,
241  const armnn::TensorShape& alphaShape,
242  const armnn::TensorShape& outputShape)
243 {
244  // Creates the PReLU layer
245  armnn::Layer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
246 
247  // Creates extra layers
248  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer> (0, "input");
249  armnn::Layer* const alpha = graph.AddLayer<armnn::InputLayer> (1, "alpha");
250  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
251 
252  // Connects up
253  armnn::TensorInfo inputTensorInfo (inputShape, armnn::DataType::Float32);
254  armnn::TensorInfo alphaTensorInfo (alphaShape, armnn::DataType::Float32);
255  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
256  Connect(input, preluLayer, inputTensorInfo, 0, 0);
257  Connect(alpha, preluLayer, alphaTensorInfo, 0, 1);
258  Connect(preluLayer, output, outputTensorInfo, 0, 0);
259 }
260 
262 {
263  armnn::Graph graph;
264 
265  // Creates the PReLU layer
266  CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
267 
268  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
269  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
270 }
271 
273 {
274  armnn::Graph graph;
275 
276  // Creates the PReLU layer
277  CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
278 
279  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
280  BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
281 }
282 
284  const std::vector<armnn::TensorShape>& inputShapes,
285  std::vector<armnn::TensorShape>& outputShapes)
286 {
287  armnn::Graph graph;
288  armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
289  outputShapes = stackLayer->InferOutputShapes(inputShapes);
290 }
291 
293 {
294  armnn::Graph graph;
295 
296  armnn::StackDescriptor descriptor;
297  descriptor.m_Axis = 1;
298  descriptor.m_NumInputs = 3;
299  descriptor.m_InputShape = armnn::TensorShape
300  (
301  { 4, 2 } // Defined input shape
302  );
303 
304  const std::vector<armnn::TensorShape> inputShapes
305  {
306  { 4, 2 }, // Actual input shapes
307  { 4, 2 },
308  { 4, 2 }
309  };
310 
311  std::vector<armnn::TensorShape> outputShapes;
312  BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
313 
314  armnn::TensorShape expectedOutputShape
315  (
316  { 4, 3, 2 }
317  );
318  BOOST_CHECK(outputShapes.size() == 1);
319  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
320 }
321 
323 {
324  armnn::Graph graph;
325 
326  armnn::StackDescriptor descriptor;
327  descriptor.m_Axis = 1;
328  descriptor.m_NumInputs = 3;
329  descriptor.m_InputShape = armnn::TensorShape
330  (
331  { 4, 2 } // Defined input shape
332  );
333 
334  const std::vector<armnn::TensorShape> inputShapes
335  {
336  { 4, 2 }, // Actual input shapes
337  { 4, 5 }, // Incorrectly shaped input tensor
338  { 4, 2 }
339  };
340 
341  // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
342  std::vector<armnn::TensorShape> outputShapes;
343  BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
344 
345  armnn::TensorShape expectedOutputShape
346  (
347  { 4, 3, 2 }
348  );
349  BOOST_CHECK(outputShapes.size() == 1);
350  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
351 }
352 
354  const armnn::StackDescriptor& descriptor,
355  const std::vector<armnn::TensorShape>& inputShapes,
356  const armnn::TensorShape& outputShape)
357 {
358  // Creates the Stack layer
359  armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
360 
361  // Creates extra layers
362  std::vector<armnn::Layer*> inputs;
363  for (unsigned int i=0; i<inputShapes.size(); ++i)
364  {
365  inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
366  }
367  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
368 
369  // Connects up
370  std::vector<armnn::TensorInfo> inputTensorInfos;
371  for (unsigned int i=0; i<inputs.size(); ++i)
372  {
373  inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
374  }
375  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
376 
377  for (unsigned int i=0; i<inputs.size(); ++i)
378  {
379  Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
380  }
381  Connect(stackLayer, output, outputTensorInfo, 0, 0);
382 }
383 
385 {
386  armnn::Graph graph;
387 
388  armnn::StackDescriptor descriptor;
389  descriptor.m_Axis = 0;
390  descriptor.m_NumInputs = 3;
391  descriptor.m_InputShape = armnn::TensorShape
392  (
393  { 2, 5 } // Defined input shape
394  );
395 
396  const std::vector<armnn::TensorShape> inputShapes
397  {
398  { 2, 5 }, // Actual input shapes
399  { 2, 5 },
400  { 2, 5 }
401  };
402 
403  // Creates the Stack layer
404  CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
405 
406  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
407  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
408 }
409 
411 {
412  armnn::Graph graph;
413 
414  armnn::StackDescriptor descriptor;
415  descriptor.m_Axis = 0;
416  descriptor.m_NumInputs = 3;
417  descriptor.m_InputShape = armnn::TensorShape
418  (
419  { 2, 5 } // Defined input shape
420  );
421 
422  const std::vector<armnn::TensorShape> inputShapes
423  {
424  { 2, 5 }, // Actual input shapes
425  { 2, 2 }, // Incorrectly shaped input tensor
426  { 2, 5 }
427  };
428 
429  // Creates the Stack layer
430  CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
431 
432  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
433  BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
434 }
435 
437 {
438  armnn::Graph graph;
439 
441  descriptor.m_DilationX = 2;
442  descriptor.m_DilationY = 2;
443  descriptor.m_PadTop = 1;
444  descriptor.m_PadBottom = 1;
445  descriptor.m_PadLeft = 1;
446  descriptor.m_PadRight = 1;
447  descriptor.m_StrideX = 3;
448  descriptor.m_StrideY = 3;
450 
451  armnn::Convolution2dLayer* const convolution2dLayer =
452  graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "convolution2d");
453 
454  std::vector<armnn::TensorShape> shapes;
455  const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
456  armnn::TensorShape inputShape(4, inputSize.data());
457  shapes.push_back(inputShape);
458 
459  const std::vector<unsigned int> filterSize = { 1, 2, 2, 2};
460  armnn::TensorShape filterShape(4, filterSize.data());
461  shapes.push_back(filterShape);
462 
463  const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
464  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
465 
466  BOOST_CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
467 }
468 
470 {
471  armnn::Graph graph;
472 
474  descriptor.m_PadTop = 0;
475  descriptor.m_PadBottom = 1;
476  descriptor.m_PadLeft = 0;
477  descriptor.m_PadRight = 1;
478  descriptor.m_StrideX = 2;
479  descriptor.m_StrideY = 2;
481 
482  armnn::TransposeConvolution2dLayer* const transposeConvolution2dLayer =
483  graph.AddLayer<armnn::TransposeConvolution2dLayer>(descriptor, "TransposeConvolution2d");
484 
485  std::vector<armnn::TensorShape> shapes;
486  const std::vector<unsigned int> inputSize = {1, 2, 3, 3};
487  armnn::TensorShape inputShape(4, inputSize.data());
488  shapes.push_back(inputShape);
489 
490  const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
491  armnn::TensorShape filterShape(4, filterSize.data());
492  shapes.push_back(filterShape);
493 
494  const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
495  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
496 
497  BOOST_CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
498 }
499 
501 {
502  armnn::Graph graph;
503 
505  descriptor.m_DilationX = 3;
506  descriptor.m_DilationY = 3;
507  descriptor.m_PadTop = 1;
508  descriptor.m_PadBottom = 2;
509  descriptor.m_PadLeft = 1;
510  descriptor.m_PadRight = 2;
511  descriptor.m_StrideX = 2;
512  descriptor.m_StrideY = 2;
514 
515  armnn::DepthwiseConvolution2dLayer* const depthwiseConvolution2dLayer =
516  graph.AddLayer<armnn::DepthwiseConvolution2dLayer>(descriptor, "DepthwiseConvolution2d");
517 
518  std::vector<armnn::TensorShape> shapes;
519  const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
520  armnn::TensorShape inputShape(4, inputSize.data());
521  shapes.push_back(inputShape);
522 
523  const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
524  armnn::TensorShape filterShape(4, filterSize.data());
525  shapes.push_back(filterShape);
526 
527  const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
528  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
529 
530  BOOST_CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
531 }
532 
533 // QuantizedLstm
534 void QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
535  std::vector<armnn::TensorShape>& outputShapes)
536 {
537  armnn::Graph graph;
538  armnn::QuantizedLstmLayer* const quantizedLstmLayer = graph.AddLayer<armnn::QuantizedLstmLayer>("quantizedLstm");
539  outputShapes = quantizedLstmLayer->InferOutputShapes(inputShapes);
540 }
541 
543 {
544  // Input shapes
545  const std::vector<unsigned int> inputShape{ 2, 5 };
546  const std::vector<unsigned int> previousCellStateInShape{ 2, 10 };
547  const std::vector<unsigned int> previousOutputInShape{ 2, 10 };
548  armnn::TensorShape inputTensorShape(2, inputShape.data());
549  armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
550  armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
551 
552  std::vector<armnn::TensorShape> inShapes
553  {
554  inputTensorShape,
555  previousCellStateInTensorShape,
556  previousOutputInTensorShape
557  };
558 
559  // Output shapes
560  const std::vector<unsigned int> cellStateOutShape{ 2, 10 };
561  const std::vector<unsigned int> outputShape{ 2, 10 };
562  armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
563  armnn::TensorShape outputTensorShape(2, outputShape.data());
564 
565  std::vector<armnn::TensorShape> expectedOutShapes
566  {
567  cellStateOutTensorShape,
568  outputTensorShape
569  };
570 
571  std::vector<armnn::TensorShape> actualOutShapes;
572  BOOST_CHECK_NO_THROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
573 
574  BOOST_CHECK(actualOutShapes.size() == 2);
575  BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
576  BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
577 }
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void StackInferOutputShapeFromInputsMatchTest()
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a BatchToSpaceNd operation.
This layer represents a 2D transpose convolution operation.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
uint32_t m_PadBottom
Padding bottom value in the height dimension.
void PreluInferOutputShapeInputBiggerTest()
uint32_t m_Axis
0-based axis along which to stack the input tensors.
uint32_t m_PadRight
Padding right value in the width dimension.
void QuantizedLstmInferOutputShapeTest()
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_DilationX
Dilation factor value for width dimension.
void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
void ArgMinMaxInferOutputShape3dTest()
void StackInferOutputShapeFromInputsNoMatchTest()
uint32_t m_PadLeft
Padding left value in the width dimension.
void PreluValidateTensorShapesFromInputsMatchTest()
This layer represents a depthwise convolution 2d operation.
This layer represents a convolution 2d operation.
This layer represents a stack operation.
Definition: StackLayer.hpp:13
void PreluValidateTensorShapesFromInputsNoMatchTest()
TensorShape m_InputShape
Required shape of all input tensors.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
This layer represents a SpaceToDepth operation.
This layer represents a ArgMinMax operation.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void StackValidateTensorShapesFromInputsNoMatchTest()
void QuantizedLstmInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_PadTop
Padding top value in the height dimension.
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CreateStackLayerHelper(armnn::Graph &graph, const armnn::StackDescriptor &descriptor, const std::vector< armnn::TensorShape > &inputShapes, const armnn::TensorShape &outputShape)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58
void BatchToSpaceInferOutputShapeTest()
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
void CreatePreluLayerHelper(armnn::Graph &graph, const armnn::TensorShape &inputShape, const armnn::TensorShape &alphaShape, const armnn::TensorShape &outputShape)
uint32_t m_NumInputs
Number of input tensors.
void ArgMinMaxInferOutputShape4dTest()
void SpaceToDepthInferOutputShapeTest()
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
void DepthwiseConvolution2dInferOutputShapeTest()
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void InferTensorInfos()
Definition: Graph.cpp:493
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Definition: PreluLayer.cpp:35
void ArgMinMaxInferOutputShape2dTest()
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
void ArgMinMaxInferOutputShape1dTest()
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
A StackDescriptor for the StackLayer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
uint32_t m_PadLeft
Padding left value in the width dimension.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void PreluInferOutputShapeAlphaBiggerTest()
void Convolution2dInferOutputShapeTest()
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
void TransposeConvolution2dInferOutputShapeTest()
void StackValidateTensorShapesFromInputsMatchTest()
uint32_t m_DilationY
Dilation along y axis.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Definition: StackLayer.cpp:33
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
A Convolution2dDescriptor for the Convolution2dLayer.
void PreluInferOutputShapeSameDimsTest()
uint32_t m_DilationX
Dilation along x axis.
void PreluInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
void PreluInferOutputShapeNoMatchTest()
std::vector< unsigned int > m_BlockShape
Block shape values.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
uint32_t m_DilationY
Dilation factor value for height dimension.
This layer represents a QuantizedLstm operation.
uint32_t m_PadRight
Padding right value in the width dimension.