ArmNN
 20.02
InferOutputTests.hpp File Reference
#include "TestUtils.hpp"
#include <Graph.hpp>
#include <layers/ArgMinMaxLayer.hpp>
#include <layers/BatchToSpaceNdLayer.hpp>
#include <layers/SpaceToDepthLayer.hpp>
#include <layers/PreluLayer.hpp>
#include <layers/StackLayer.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

void ArgMinMaxInferOutputShapeImpl (const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
 
void ArgMinMaxInferOutputShape4dTest ()
 
void ArgMinMaxInferOutputShape3dTest ()
 
void ArgMinMaxInferOutputShape2dTest ()
 
void ArgMinMaxInferOutputShape1dTest ()
 
void BatchToSpaceInferOutputShapeTest ()
 
void SpaceToDepthInferOutputShapeTest ()
 
void PreluInferOutputShapeImpl (const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
 
void PreluInferOutputShapeSameDimsTest ()
 
void PreluInferOutputShapeInputBiggerTest ()
 
void PreluInferOutputShapeAlphaBiggerTest ()
 
void PreluInferOutputShapeNoMatchTest ()
 
void CreatePreluLayerHelper (armnn::Graph &graph, const armnn::TensorShape &inputShape, const armnn::TensorShape &alphaShape, const armnn::TensorShape &outputShape)
 
void PreluValidateTensorShapesFromInputsMatchTest ()
 
void PreluValidateTensorShapesFromInputsNoMatchTest ()
 
void StackInferOutputShapeImpl (const armnn::StackDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
 
void StackInferOutputShapeFromInputsMatchTest ()
 
void StackInferOutputShapeFromInputsNoMatchTest ()
 
void CreateStackLayerHelper (armnn::Graph &graph, const armnn::StackDescriptor &descriptor, const std::vector< armnn::TensorShape > &inputShapes, const armnn::TensorShape &outputShape)
 
void StackValidateTensorShapesFromInputsMatchTest ()
 
void StackValidateTensorShapesFromInputsNoMatchTest ()
 
void Convolution2dInferOutputShapeTest ()
 
void TransposeConvolution2dInferOutputShapeTest ()
 
void DepthwiseConvolution2dInferOutputShapeTest ()
 
void QuantizedLstmInferOutputShapeImpl (const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
 
void QuantizedLstmInferOutputShapeTest ()
 

Function Documentation

◆ ArgMinMaxInferOutputShape1dTest()

void ArgMinMaxInferOutputShape1dTest ( )

Definition at line 87 of file InferOutputTests.hpp.

References ArgMinMaxInferOutputShapeImpl(), BOOST_CHECK(), and ArgMinMaxDescriptor::m_Axis.

88 {
89  armnn::Graph graph;
90  armnn::ArgMinMaxDescriptor descriptor;
91  descriptor.m_Axis = 0;
92 
93  const std::vector<armnn::TensorShape> inputShapes
94  {
95  { 5 }
96  };
97 
98  std::vector<armnn::TensorShape> outputShapes;
99  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
100 
101  armnn::TensorShape expectedOutputShape( { 1 } );
102  BOOST_CHECK(outputShapes.size() == 1);
103  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
104 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58

◆ ArgMinMaxInferOutputShape2dTest()

void ArgMinMaxInferOutputShape2dTest ( )

Definition at line 68 of file InferOutputTests.hpp.

References ArgMinMaxInferOutputShapeImpl(), BOOST_CHECK(), and ArgMinMaxDescriptor::m_Axis.

69 {
70  armnn::Graph graph;
71  armnn::ArgMinMaxDescriptor descriptor;
72  descriptor.m_Axis = 1;
73 
74  const std::vector<armnn::TensorShape> inputShapes
75  {
76  { 3, 2 }
77  };
78 
79  std::vector<armnn::TensorShape> outputShapes;
80  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
81 
82  armnn::TensorShape expectedOutputShape( { 3 } );
83  BOOST_CHECK(outputShapes.size() == 1);
84  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
85 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58

◆ ArgMinMaxInferOutputShape3dTest()

void ArgMinMaxInferOutputShape3dTest ( )

Definition at line 49 of file InferOutputTests.hpp.

References ArgMinMaxInferOutputShapeImpl(), BOOST_CHECK(), and ArgMinMaxDescriptor::m_Axis.

50 {
51  armnn::Graph graph;
52  armnn::ArgMinMaxDescriptor descriptor;
53  descriptor.m_Axis = 0;
54 
55  const std::vector<armnn::TensorShape> inputShapes
56  {
57  { 1, 3, 2 }
58  };
59 
60  std::vector<armnn::TensorShape> outputShapes;
61  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
62 
63  armnn::TensorShape expectedOutputShape( { 3, 2 } );
64  BOOST_CHECK(outputShapes.size() == 1);
65  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
66 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58

◆ ArgMinMaxInferOutputShape4dTest()

void ArgMinMaxInferOutputShape4dTest ( )

Definition at line 30 of file InferOutputTests.hpp.

References ArgMinMaxInferOutputShapeImpl(), BOOST_CHECK(), and ArgMinMaxDescriptor::m_Axis.

31 {
32  armnn::Graph graph;
33  armnn::ArgMinMaxDescriptor descriptor;
34  descriptor.m_Axis = 2;
35 
36  const std::vector<armnn::TensorShape> inputShapes
37  {
38  { 1, 3, 2, 4 }
39  };
40 
41  std::vector<armnn::TensorShape> outputShapes;
42  BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
43 
44  armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
45  BOOST_CHECK(outputShapes.size() == 1);
46  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
47 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58

◆ ArgMinMaxInferOutputShapeImpl()

void ArgMinMaxInferOutputShapeImpl ( const armnn::ArgMinMaxDescriptor  descriptor,
const std::vector< armnn::TensorShape > &  inputShapes,
std::vector< armnn::TensorShape > &  outputShapes 
)

Definition at line 21 of file InferOutputTests.hpp.

References Graph::AddLayer(), and ArgMinMaxLayer::InferOutputShapes().

Referenced by ArgMinMaxInferOutputShape1dTest(), ArgMinMaxInferOutputShape2dTest(), ArgMinMaxInferOutputShape3dTest(), and ArgMinMaxInferOutputShape4dTest().

24 {
25  armnn::Graph graph;
26  auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
27  outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
28 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
This layer represents a ArgMinMax operation.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from a given input shape and axis parameter.

◆ BatchToSpaceInferOutputShapeTest()

void BatchToSpaceInferOutputShapeTest ( )

Definition at line 106 of file InferOutputTests.hpp.

References Graph::AddLayer(), BOOST_CHECK(), BatchToSpaceNdLayer::InferOutputShapes(), BatchToSpaceNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, BatchToSpaceNdDescriptor::m_DataLayout, and armnn::NHWC.

107 {
108  armnn::Graph graph;
109 
111  descriptor.m_BlockShape = {2, 2};
112  descriptor.m_Crops = {{0, 0}, {2, 0}};
114 
115  armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
116  graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
117 
118  std::vector<armnn::TensorShape> shapes;
119  const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
120  armnn::TensorShape shape(4, theDimSizes.data());
121  shapes.push_back(shape);
122 
123  const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
124  armnn::TensorShape expectedShape(4, expectedDimSizes.data());
125 
126  BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
127 }
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
std::vector< unsigned int > m_BlockShape
Block shape values.
This layer represents a BatchToSpaceNd operation.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.

◆ Convolution2dInferOutputShapeTest()

void Convolution2dInferOutputShapeTest ( )

Definition at line 436 of file InferOutputTests.hpp.

References Graph::AddLayer(), BOOST_CHECK(), Convolution2dLayer::InferOutputShapes(), Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, and armnn::NCHW.

437 {
438  armnn::Graph graph;
439 
441  descriptor.m_DilationX = 2;
442  descriptor.m_DilationY = 2;
443  descriptor.m_PadTop = 1;
444  descriptor.m_PadBottom = 1;
445  descriptor.m_PadLeft = 1;
446  descriptor.m_PadRight = 1;
447  descriptor.m_StrideX = 3;
448  descriptor.m_StrideY = 3;
450 
451  armnn::Convolution2dLayer* const convolution2dLayer =
452  graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "convolution2d");
453 
454  std::vector<armnn::TensorShape> shapes;
455  const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
456  armnn::TensorShape inputShape(4, inputSize.data());
457  shapes.push_back(inputShape);
458 
459  const std::vector<unsigned int> filterSize = { 1, 2, 2, 2};
460  armnn::TensorShape filterShape(4, filterSize.data());
461  shapes.push_back(filterShape);
462 
463  const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
464  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
465 
466  BOOST_CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
467 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationY
Dilation along y axis.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_DilationX
Dilation along x axis.
This layer represents a convolution 2d operation.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
uint32_t m_PadLeft
Padding left value in the width dimension.

◆ CreatePreluLayerHelper()

void CreatePreluLayerHelper ( armnn::Graph graph,
const armnn::TensorShape inputShape,
const armnn::TensorShape alphaShape,
const armnn::TensorShape outputShape 
)

Definition at line 239 of file InferOutputTests.hpp.

References Graph::AddLayer(), Connect(), and armnn::Float32.

Referenced by PreluValidateTensorShapesFromInputsMatchTest(), and PreluValidateTensorShapesFromInputsNoMatchTest().

243 {
244  // Creates the PReLU layer
245  armnn::Layer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
246 
247  // Creates extra layers
248  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer> (0, "input");
249  armnn::Layer* const alpha = graph.AddLayer<armnn::InputLayer> (1, "alpha");
250  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
251 
252  // Connects up
253  armnn::TensorInfo inputTensorInfo (inputShape, armnn::DataType::Float32);
254  armnn::TensorInfo alphaTensorInfo (alphaShape, armnn::DataType::Float32);
255  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
256  Connect(input, preluLayer, inputTensorInfo, 0, 0);
257  Connect(alpha, preluLayer, alphaTensorInfo, 0, 1);
258  Connect(preluLayer, output, outputTensorInfo, 0, 0);
259 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12

◆ CreateStackLayerHelper()

void CreateStackLayerHelper ( armnn::Graph graph,
const armnn::StackDescriptor descriptor,
const std::vector< armnn::TensorShape > &  inputShapes,
const armnn::TensorShape outputShape 
)

Definition at line 353 of file InferOutputTests.hpp.

References Graph::AddLayer(), Connect(), and armnn::Float32.

Referenced by StackValidateTensorShapesFromInputsMatchTest(), and StackValidateTensorShapesFromInputsNoMatchTest().

357 {
358  // Creates the Stack layer
359  armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
360 
361  // Creates extra layers
362  std::vector<armnn::Layer*> inputs;
363  for (unsigned int i=0; i<inputShapes.size(); ++i)
364  {
365  inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
366  }
367  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
368 
369  // Connects up
370  std::vector<armnn::TensorInfo> inputTensorInfos;
371  for (unsigned int i=0; i<inputs.size(); ++i)
372  {
373  inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
374  }
375  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
376 
377  for (unsigned int i=0; i<inputs.size(); ++i)
378  {
379  Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
380  }
381  Connect(stackLayer, output, outputTensorInfo, 0, 0);
382 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a stack operation.
Definition: StackLayer.hpp:13
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12

◆ DepthwiseConvolution2dInferOutputShapeTest()

void DepthwiseConvolution2dInferOutputShapeTest ( )

Definition at line 500 of file InferOutputTests.hpp.

References Graph::AddLayer(), BOOST_CHECK(), DepthwiseConvolution2dLayer::InferOutputShapes(), DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, and armnn::NCHW.

501 {
502  armnn::Graph graph;
503 
505  descriptor.m_DilationX = 3;
506  descriptor.m_DilationY = 3;
507  descriptor.m_PadTop = 1;
508  descriptor.m_PadBottom = 2;
509  descriptor.m_PadLeft = 1;
510  descriptor.m_PadRight = 2;
511  descriptor.m_StrideX = 2;
512  descriptor.m_StrideY = 2;
514 
515  armnn::DepthwiseConvolution2dLayer* const depthwiseConvolution2dLayer =
516  graph.AddLayer<armnn::DepthwiseConvolution2dLayer>(descriptor, "DepthwiseConvolution2d");
517 
518  std::vector<armnn::TensorShape> shapes;
519  const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
520  armnn::TensorShape inputShape(4, inputSize.data());
521  shapes.push_back(inputShape);
522 
523  const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
524  armnn::TensorShape filterShape(4, filterSize.data());
525  shapes.push_back(filterShape);
526 
527  const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
528  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
529 
530  BOOST_CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
531 }
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_DilationY
Dilation factor value for height dimension.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.

◆ PreluInferOutputShapeAlphaBiggerTest()

void PreluInferOutputShapeAlphaBiggerTest ( )

Definition at line 199 of file InferOutputTests.hpp.

References BOOST_CHECK(), and PreluInferOutputShapeImpl().

200 {
201  const std::vector<armnn::TensorShape> inputShapes
202  {
203  { 4, 1, 2 }, // Input shape
204  { 5, 4, 3, 1 } // Alpha shape
205  };
206 
207  const std::vector<armnn::TensorShape> expectedOutputShapes
208  {
209  { 5, 4, 3, 2 } // Output shape
210  };
211 
212  std::vector<armnn::TensorShape> outputShapes;
213  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
214 
215  BOOST_CHECK(outputShapes.size() == 1);
216  BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
217 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
void PreluInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)

◆ PreluInferOutputShapeImpl()

void PreluInferOutputShapeImpl ( const std::vector< armnn::TensorShape > &  inputShapes,
std::vector< armnn::TensorShape > &  outputShapes 
)

Definition at line 151 of file InferOutputTests.hpp.

References Graph::AddLayer(), and PreluLayer::InferOutputShapes().

Referenced by PreluInferOutputShapeAlphaBiggerTest(), PreluInferOutputShapeInputBiggerTest(), PreluInferOutputShapeNoMatchTest(), and PreluInferOutputShapeSameDimsTest().

153 {
154  armnn::Graph graph;
155  armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
156  outputShapes = preluLayer->InferOutputShapes(inputShapes);
157 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
Definition: PreluLayer.cpp:35

◆ PreluInferOutputShapeInputBiggerTest()

void PreluInferOutputShapeInputBiggerTest ( )

Definition at line 179 of file InferOutputTests.hpp.

References BOOST_CHECK(), and PreluInferOutputShapeImpl().

180 {
181  const std::vector<armnn::TensorShape> inputShapes
182  {
183  { 4, 1, 4, 8 }, // Input shape
184  { 5, 4, 1 } // Alpha shape
185  };
186 
187  const std::vector<armnn::TensorShape> expectedOutputShapes
188  {
189  { 4, 5, 4, 8 } // Output shape
190  };
191 
192  std::vector<armnn::TensorShape> outputShapes;
193  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
194 
195  BOOST_CHECK(outputShapes.size() == 1);
196  BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
197 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
void PreluInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)

◆ PreluInferOutputShapeNoMatchTest()

void PreluInferOutputShapeNoMatchTest ( )

Definition at line 219 of file InferOutputTests.hpp.

References BOOST_CHECK(), and PreluInferOutputShapeImpl().

220 {
221  const std::vector<armnn::TensorShape> inputShapes
222  {
223  { 4, 1, 2 }, // Input shape
224  { 5, 4, 3, 1 } // Alpha shape
225  };
226 
227  const std::vector<armnn::TensorShape> expectedOutputShapes
228  {
229  { 5, 7, 3, 2 } // Output shape
230  };
231 
232  std::vector<armnn::TensorShape> outputShapes;
233  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
234 
235  BOOST_CHECK(outputShapes.size() == 1);
236  BOOST_CHECK(outputShapes[0] != expectedOutputShapes[0]);
237 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
void PreluInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)

◆ PreluInferOutputShapeSameDimsTest()

void PreluInferOutputShapeSameDimsTest ( )

Definition at line 159 of file InferOutputTests.hpp.

References BOOST_CHECK(), and PreluInferOutputShapeImpl().

160 {
161  const std::vector<armnn::TensorShape> inputShapes
162  {
163  { 5, 1, 1, 7 }, // Input shape
164  { 5, 4, 3, 1 } // Alpha shape
165  };
166 
167  const std::vector<armnn::TensorShape> expectedOutputShapes
168  {
169  { 5, 4, 3, 7 } // Output shape
170  };
171 
172  std::vector<armnn::TensorShape> outputShapes;
173  BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
174 
175  BOOST_CHECK(outputShapes.size() == 1);
176  BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
177 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
void PreluInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)

◆ PreluValidateTensorShapesFromInputsMatchTest()

void PreluValidateTensorShapesFromInputsMatchTest ( )

Definition at line 261 of file InferOutputTests.hpp.

References CreatePreluLayerHelper(), and Graph::InferTensorInfos().

262 {
263  armnn::Graph graph;
264 
265  // Creates the PReLU layer
266  CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
267 
268  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
269  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
270 }
void CreatePreluLayerHelper(armnn::Graph &graph, const armnn::TensorShape &inputShape, const armnn::TensorShape &alphaShape, const armnn::TensorShape &outputShape)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ PreluValidateTensorShapesFromInputsNoMatchTest()

void PreluValidateTensorShapesFromInputsNoMatchTest ( )

Definition at line 272 of file InferOutputTests.hpp.

References CreatePreluLayerHelper(), and Graph::InferTensorInfos().

273 {
274  armnn::Graph graph;
275 
276  // Creates the PReLU layer
277  CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
278 
279  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
280  BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
281 }
void CreatePreluLayerHelper(armnn::Graph &graph, const armnn::TensorShape &inputShape, const armnn::TensorShape &alphaShape, const armnn::TensorShape &outputShape)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ QuantizedLstmInferOutputShapeImpl()

void QuantizedLstmInferOutputShapeImpl ( const std::vector< armnn::TensorShape > &  inputShapes,
std::vector< armnn::TensorShape > &  outputShapes 
)

Definition at line 534 of file InferOutputTests.hpp.

References Graph::AddLayer(), and QuantizedLstmLayer::InferOutputShapes().

Referenced by QuantizedLstmInferOutputShapeTest().

536 {
537  armnn::Graph graph;
538  armnn::QuantizedLstmLayer* const quantizedLstmLayer = graph.AddLayer<armnn::QuantizedLstmLayer>("quantizedLstm");
539  outputShapes = quantizedLstmLayer->InferOutputShapes(inputShapes);
540 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
This layer represents a QuantizedLstm operation.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.

◆ QuantizedLstmInferOutputShapeTest()

void QuantizedLstmInferOutputShapeTest ( )

Definition at line 542 of file InferOutputTests.hpp.

References BOOST_CHECK(), and QuantizedLstmInferOutputShapeImpl().

543 {
544  // Input shapes
545  const std::vector<unsigned int> inputShape{ 2, 5 };
546  const std::vector<unsigned int> previousCellStateInShape{ 2, 10 };
547  const std::vector<unsigned int> previousOutputInShape{ 2, 10 };
548  armnn::TensorShape inputTensorShape(2, inputShape.data());
549  armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
550  armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
551 
552  std::vector<armnn::TensorShape> inShapes
553  {
554  inputTensorShape,
555  previousCellStateInTensorShape,
556  previousOutputInTensorShape
557  };
558 
559  // Output shapes
560  const std::vector<unsigned int> cellStateOutShape{ 2, 10 };
561  const std::vector<unsigned int> outputShape{ 2, 10 };
562  armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
563  armnn::TensorShape outputTensorShape(2, outputShape.data());
564 
565  std::vector<armnn::TensorShape> expectedOutShapes
566  {
567  cellStateOutTensorShape,
568  outputTensorShape
569  };
570 
571  std::vector<armnn::TensorShape> actualOutShapes;
572  BOOST_CHECK_NO_THROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
573 
574  BOOST_CHECK(actualOutShapes.size() == 2);
575  BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
576  BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
577 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
void QuantizedLstmInferOutputShapeImpl(const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)

◆ SpaceToDepthInferOutputShapeTest()

void SpaceToDepthInferOutputShapeTest ( )

Definition at line 129 of file InferOutputTests.hpp.

References Graph::AddLayer(), BOOST_CHECK(), SpaceToDepthLayer::InferOutputShapes(), SpaceToDepthDescriptor::m_BlockSize, SpaceToDepthDescriptor::m_DataLayout, and armnn::NHWC.

130 {
131  armnn::Graph graph;
132 
134  descriptor.m_BlockSize = 2;
136 
137  armnn::SpaceToDepthLayer* const spaceToDepthLayer =
138  graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
139 
140  std::vector<armnn::TensorShape> shapes;
141  const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
142  armnn::TensorShape shape(4, dimSizes.data());
143  shapes.push_back(shape);
144 
145  const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
146  armnn::TensorShape expectedShape(4, expectedDimSizes.data());
147 
148  BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
149 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
This layer represents a SpaceToDepth operation.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.

◆ StackInferOutputShapeFromInputsMatchTest()

void StackInferOutputShapeFromInputsMatchTest ( )

Definition at line 292 of file InferOutputTests.hpp.

References BOOST_CHECK(), StackDescriptor::m_Axis, StackDescriptor::m_InputShape, StackDescriptor::m_NumInputs, and StackInferOutputShapeImpl().

293 {
294  armnn::Graph graph;
295 
296  armnn::StackDescriptor descriptor;
297  descriptor.m_Axis = 1;
298  descriptor.m_NumInputs = 3;
299  descriptor.m_InputShape = armnn::TensorShape
300  (
301  { 4, 2 } // Defined input shape
302  );
303 
304  const std::vector<armnn::TensorShape> inputShapes
305  {
306  { 4, 2 }, // Actual input shapes
307  { 4, 2 },
308  { 4, 2 }
309  };
310 
311  std::vector<armnn::TensorShape> outputShapes;
312  BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
313 
314  armnn::TensorShape expectedOutputShape
315  (
316  { 4, 3, 2 }
317  );
318  BOOST_CHECK(outputShapes.size() == 1);
319  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
320 }
void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_Axis
0-based axis along which to stack the input tensors.
TensorShape m_InputShape
Required shape of all input tensors.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
A StackDescriptor for the StackLayer.
uint32_t m_NumInputs
Number of input tensors.

◆ StackInferOutputShapeFromInputsNoMatchTest()

void StackInferOutputShapeFromInputsNoMatchTest ( )

Definition at line 322 of file InferOutputTests.hpp.

References BOOST_CHECK(), StackDescriptor::m_Axis, StackDescriptor::m_InputShape, StackDescriptor::m_NumInputs, and StackInferOutputShapeImpl().

323 {
324  armnn::Graph graph;
325 
326  armnn::StackDescriptor descriptor;
327  descriptor.m_Axis = 1;
328  descriptor.m_NumInputs = 3;
329  descriptor.m_InputShape = armnn::TensorShape
330  (
331  { 4, 2 } // Defined input shape
332  );
333 
334  const std::vector<armnn::TensorShape> inputShapes
335  {
336  { 4, 2 }, // Actual input shapes
337  { 4, 5 }, // Incorrectly shaped input tensor
338  { 4, 2 }
339  };
340 
341  // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
342  std::vector<armnn::TensorShape> outputShapes;
343  BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
344 
345  armnn::TensorShape expectedOutputShape
346  (
347  { 4, 3, 2 }
348  );
349  BOOST_CHECK(outputShapes.size() == 1);
350  BOOST_CHECK(outputShapes[0] == expectedOutputShape);
351 }
void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor, const std::vector< armnn::TensorShape > &inputShapes, std::vector< armnn::TensorShape > &outputShapes)
uint32_t m_Axis
0-based axis along which to stack the input tensors.
TensorShape m_InputShape
Required shape of all input tensors.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
A StackDescriptor for the StackLayer.
uint32_t m_NumInputs
Number of input tensors.

◆ StackInferOutputShapeImpl()

void StackInferOutputShapeImpl ( const armnn::StackDescriptor  descriptor,
const std::vector< armnn::TensorShape > &  inputShapes,
std::vector< armnn::TensorShape > &  outputShapes 
)

Definition at line 283 of file InferOutputTests.hpp.

References Graph::AddLayer(), and StackLayer::InferOutputShapes().

Referenced by StackInferOutputShapeFromInputsMatchTest(), and StackInferOutputShapeFromInputsNoMatchTest().

286 {
287  armnn::Graph graph;
288  armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
289  outputShapes = stackLayer->InferOutputShapes(inputShapes);
290 }
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
Definition: StackLayer.cpp:33
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
This layer represents a stack operation.
Definition: StackLayer.hpp:13

◆ StackValidateTensorShapesFromInputsMatchTest()

void StackValidateTensorShapesFromInputsMatchTest ( )

Definition at line 384 of file InferOutputTests.hpp.

References CreateStackLayerHelper(), Graph::InferTensorInfos(), StackDescriptor::m_Axis, StackDescriptor::m_InputShape, and StackDescriptor::m_NumInputs.

385 {
386  armnn::Graph graph;
387 
388  armnn::StackDescriptor descriptor;
389  descriptor.m_Axis = 0;
390  descriptor.m_NumInputs = 3;
391  descriptor.m_InputShape = armnn::TensorShape
392  (
393  { 2, 5 } // Defined input shape
394  );
395 
396  const std::vector<armnn::TensorShape> inputShapes
397  {
398  { 2, 5 }, // Actual input shapes
399  { 2, 5 },
400  { 2, 5 }
401  };
402 
403  // Creates the Stack layer
404  CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
405 
406  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
407  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
408 }
uint32_t m_Axis
0-based axis along which to stack the input tensors.
TensorShape m_InputShape
Required shape of all input tensors.
A StackDescriptor for the StackLayer.
void CreateStackLayerHelper(armnn::Graph &graph, const armnn::StackDescriptor &descriptor, const std::vector< armnn::TensorShape > &inputShapes, const armnn::TensorShape &outputShape)
uint32_t m_NumInputs
Number of input tensors.
void InferTensorInfos()
Definition: Graph.cpp:493

◆ StackValidateTensorShapesFromInputsNoMatchTest()

void StackValidateTensorShapesFromInputsNoMatchTest ( )

Definition at line 410 of file InferOutputTests.hpp.

References CreateStackLayerHelper(), Graph::InferTensorInfos(), StackDescriptor::m_Axis, StackDescriptor::m_InputShape, and StackDescriptor::m_NumInputs.

411 {
412  armnn::Graph graph;
413 
414  armnn::StackDescriptor descriptor;
415  descriptor.m_Axis = 0;
416  descriptor.m_NumInputs = 3;
417  descriptor.m_InputShape = armnn::TensorShape
418  (
419  { 2, 5 } // Defined input shape
420  );
421 
422  const std::vector<armnn::TensorShape> inputShapes
423  {
424  { 2, 5 }, // Actual input shapes
425  { 2, 2 }, // Incorrectly shaped input tensor
426  { 2, 5 }
427  };
428 
429  // Creates the Stack layer
430  CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
431 
432  // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
433  BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
434 }
uint32_t m_Axis
0-based axis along which to stack the input tensors.
TensorShape m_InputShape
Required shape of all input tensors.
A StackDescriptor for the StackLayer.
void CreateStackLayerHelper(armnn::Graph &graph, const armnn::StackDescriptor &descriptor, const std::vector< armnn::TensorShape > &inputShapes, const armnn::TensorShape &outputShape)
uint32_t m_NumInputs
Number of input tensors.
void InferTensorInfos()
Definition: Graph.cpp:493

◆ TransposeConvolution2dInferOutputShapeTest()

void TransposeConvolution2dInferOutputShapeTest ( )

Definition at line 469 of file InferOutputTests.hpp.

References Graph::AddLayer(), BOOST_CHECK(), TransposeConvolution2dLayer::InferOutputShapes(), TransposeConvolution2dDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideY, and armnn::NCHW.

470 {
471  armnn::Graph graph;
472 
474  descriptor.m_PadTop = 0;
475  descriptor.m_PadBottom = 1;
476  descriptor.m_PadLeft = 0;
477  descriptor.m_PadRight = 1;
478  descriptor.m_StrideX = 2;
479  descriptor.m_StrideY = 2;
481 
482  armnn::TransposeConvolution2dLayer* const transposeConvolution2dLayer =
483  graph.AddLayer<armnn::TransposeConvolution2dLayer>(descriptor, "TransposeConvolution2d");
484 
485  std::vector<armnn::TensorShape> shapes;
486  const std::vector<unsigned int> inputSize = {1, 2, 3, 3};
487  armnn::TensorShape inputShape(4, inputSize.data());
488  shapes.push_back(inputShape);
489 
490  const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
491  armnn::TensorShape filterShape(4, filterSize.data());
492  shapes.push_back(filterShape);
493 
494  const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
495  armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
496 
497  BOOST_CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
498 }
This layer represents a 2D transpose convolution operation.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
uint32_t m_PadBottom
Padding bottom value in the height dimension.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.