ArmNN
 21.05
OptimizerTests.cpp File Reference

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
 
 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
 
 BOOST_AUTO_TEST_CASE (InsertConvertersTest)
 
void CreateConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputsNhwc)
 
void CreateDepthwiseConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
 
void CreatePooling2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputsNhwc)
 
void CreateResizeBilinearGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputsNhwc)
 
void CreateGatherGraph (Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs1DParams)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputsMultiDimIndices)
 
 BOOST_AUTO_TEST_CASE (DetectionPostProcessValidateTensorShapes)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoConvolution2dLayer)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoPooling2dLayer)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded)
 
 BOOST_AUTO_TEST_CASE (BackendCapabilityTest)
 
 BOOST_AUTO_TEST_CASE (BackendHintTest)
 
 BOOST_AUTO_TEST_CASE (OptimizeForExclusiveConnectionsFuseTest)
 
 BOOST_AUTO_TEST_CASE (OptimizeForExclusiveConnectionsWithoutFuseTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/24]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGDisabledTest  )

Definition at line 146 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

147 {
148  Graph graph;
149 
150  //Helper function creates graph containing LSTM layer with required input and output layers
151  CreateLSTMLayerHelper(graph, false);
152 
153  //This function used to call ValidateShapesFromInputs();
154  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
155 }
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [2/24]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGEnabledTest  )

Definition at line 157 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

158 {
159  Graph graph;
160 
161  //Helper function creates graph containing LSTM layer with required input and output layers
162  CreateLSTMLayerHelper(graph, true);
163 
164  //This function used to call ValidateShapesFromInputs();
165  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
166 }
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [3/24]

BOOST_AUTO_TEST_CASE ( InsertConvertersTest  )

Definition at line 168 of file OptimizerTests.cpp.

References armnn::Addition, ARMNN_ASSERT, CheckSequence(), armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Float16, armnn::Float32, armnn::Floor, TensorInfo::GetDataType(), Layer::GetDataType(), Layer::GetInputSlot(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::info, armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), and OutputHandler::SetTensorInfo().

169 {
170  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
171 
172  armnn::Graph graph;
173 
174  armnn::LayerBindingId inputId = 0;
175 
176  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
177 
178  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
179  head->GetOutputHandler().SetTensorInfo(info);
180 
181  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
182  ->GetOutputHandler().SetTensorInfo(info);
183 
184  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
185  head->GetOutputHandler().SetTensorInfo(info);
186 
187  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
188  head->GetOutputHandler().SetTensorInfo(info);
189 
190  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
191  ->GetOutputHandler().SetTensorInfo(info);
192 
193  // Check graph layer sequence before inserting convert layers
194  BOOST_TEST(CheckSequence(graph.cbegin(),
195  graph.cend(),
196  &IsLayerOfType<armnn::InputLayer>,
197  &IsLayerOfType<armnn::InputLayer>,
198  &IsLayerOfType<armnn::MemCopyLayer>,
199  &IsLayerOfType<armnn::FloorLayer>,
200  &IsLayerOfType<armnn::AdditionLayer>,
201  &IsLayerOfType<armnn::OutputLayer>));
202 
203  // Check layers have Float16 DataType
204  for (auto& layer : graph)
205  {
206  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
207  {
208  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
209  ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
210  }
211  }
212 
213  // Insert convert layers either side of unsupported layer
214  for (auto& layer : graph)
215  {
216  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
217  {
219  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
220  }
221  }
222 
223  // Check layers have correct DataType after inserting convert layers
224  for (auto& layer : graph)
225  {
226  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
227  {
228  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
229  ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
230  }
231  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
232  {
233  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
234  ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
235  }
236  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
237  {
238  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
239  ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
240  }
241  }
242 
243  // Check sequence of layers after inserting convert layers
244  BOOST_TEST(CheckSequence(graph.cbegin(),
245  graph.cend(),
246  &IsLayerOfType<armnn::InputLayer>,
247  &IsLayerOfType<armnn::InputLayer>,
248  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
249  &IsLayerOfType<armnn::MemCopyLayer>,
250  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
251  &IsLayerOfType<armnn::FloorLayer>,
252  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
253  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
254  &IsLayerOfType<armnn::AdditionLayer>,
255  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
256  &IsLayerOfType<armnn::OutputLayer>));
257 }
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:243
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
This layer represents an addition operation.
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21

◆ BOOST_AUTO_TEST_CASE() [4/24]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputs  )

Definition at line 287 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), and Graph::InferTensorInfos().

288 {
289  Graph graph;
290  const unsigned int inputShape[] = { 1, 3, 8, 16 };
291  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
292  const unsigned int outputShape[] = { 1, 2, 4, 14 };
293  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
294 
295  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
296 }
void InferTensorInfos()
Definition: Graph.cpp:529
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [5/24]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 298 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

299 {
300  Graph graph;
301  const unsigned int inputShape[] = { 1, 8, 16, 3 };
302  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
303  const unsigned int outputShape[] = { 1, 4, 14, 2 };
304  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
305 
306  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
307 }
void InferTensorInfos()
Definition: Graph.cpp:529
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [6/24]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputs  )

Definition at line 337 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), and Graph::InferTensorInfos().

338 {
339  Graph graph;
340  const unsigned int inputShape[] = { 1, 2, 3, 3 };
341  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
342  const unsigned int outputShape[] = { 1, 2, 1, 1 };
343  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
344 
345  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
346 }
void InferTensorInfos()
Definition: Graph.cpp:529
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [7/24]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 348 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

349 {
350  Graph graph;
351  const unsigned int inputShape[] = { 1, 3, 3, 2 };
352  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
353  const unsigned int outputShape[] = { 1, 1, 1, 2 };
354  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
355 
356  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
357 }
void InferTensorInfos()
Definition: Graph.cpp:529
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [8/24]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputs  )

Definition at line 387 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NCHW.

388 {
389  Graph graph;
390  const unsigned int inputShape[] = { 5, 3, 52, 60 };
391  const unsigned int outputShape[] = { 5, 3, 11, 13 };
392  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
393 
394  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
395 }
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [9/24]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputsNhwc  )

Definition at line 397 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

398 {
399  Graph graph;
400  const unsigned int inputShape[] = { 5, 52, 60, 3 };
401  const unsigned int outputShape[] = { 5, 11, 13, 3 };
402  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
403 
404  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
405 }
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [10/24]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputs  )

Definition at line 432 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), and Graph::InferTensorInfos().

433 {
434  Graph graph;
435  const unsigned int inputShape[] = { 1, 2, 4, 5 };
436  const unsigned int outputShape[] = { 1, 2, 3, 4 };
437  CreateResizeBilinearGraph(graph, inputShape, outputShape);
438 
439  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
440 }
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [11/24]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputsNhwc  )

Definition at line 442 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

443 {
444  Graph graph;
445  const unsigned int inputShape[] = { 1, 4, 5, 2 };
446  const unsigned int outputShape[] = { 1, 3, 4, 2 };
447  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
448 
449  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
450 }
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [12/24]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs  )

Definition at line 473 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

474 {
475  Graph graph;
476  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
477  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
478  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
479 
480  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
481 
482  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
483 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [13/24]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs1DParams  )

Definition at line 485 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

486 {
487  Graph graph;
488  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
489  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
490  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
491 
492  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
493 
494  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
495 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [14/24]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputsMultiDimIndices  )

Definition at line 497 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

498 {
499  Graph graph;
500  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
501  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
502  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
503 
504  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
505 
506  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
507 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:529

◆ BOOST_AUTO_TEST_CASE() [15/24]

BOOST_AUTO_TEST_CASE ( DetectionPostProcessValidateTensorShapes  )

Definition at line 509 of file OptimizerTests.cpp.

References Graph::AddLayer(), anchors(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), Graph::InferTensorInfos(), DetectionPostProcessLayer::m_Anchors, DetectionPostProcessDescriptor::m_MaxDetections, armnn::QAsymmU8, scoresInfo, and OutputSlot::SetTensorInfo().

510 {
511  Graph graph;
512  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
513  armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
514  std::vector<uint8_t> anchorsVector(40);
516 
517  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
518  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
519  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
520  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
521 
522  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
523  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
524 
525  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
527 
529  descriptor.m_MaxDetections = 3;
530 
531  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
532  layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
533  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
534  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
535  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
536  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
537 
538  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
539  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
540 
541  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
542 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
This layer represents a detection postprocess operator.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
void InferTensorInfos()
Definition: Graph.cpp:529
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ BOOST_AUTO_TEST_CASE() [16/24]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoConvolution2dLayer  )

Definition at line 544 of file OptimizerTests.cpp.

References Graph::AddLayer(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

545 {
546  Graph graph;
547  const unsigned int inputShape[] = { 1, 2, 2, 3 };
548  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
549  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
550  const unsigned int outputShape[] = { 1, 2, 1, 1 };
551 
552  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
553  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
554  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
555 
556  Layer* input = graph.AddLayer<InputLayer>(0, "input");
557  input->GetOutputSlot().SetTensorInfo(inputInfo);
558 
559  PadDescriptor padDescriptor({ { 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 } });
560 
561  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
562  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
563 
564  Convolution2dDescriptor convolution2dDescriptor;
565  convolution2dDescriptor.m_BiasEnabled = false;
566  convolution2dDescriptor.m_StrideX = 1;
567  convolution2dDescriptor.m_StrideY = 1;
568  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
569 
570  std::vector<float> weightsVector(18);
571  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
572 
573  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
574  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
575  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
576 
577  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
578 
579  // Connect up layers - input -> pad -> conv2d -> output
580  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
581  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
582  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
583 
584  auto checkSimpleConv2d = [](const armnn::Layer* const layer) -> bool {
585  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
586  const auto conv2dLayerParams = conv2dLayer->GetParameters();
587  return IsLayerOfType<armnn::Convolution2dLayer>(layer) && (layer->GetNameStr() == "conv2d") &&
588  (conv2dLayerParams.m_PadLeft == 0) && (conv2dLayerParams.m_PadRight == 0) &&
589  (conv2dLayerParams.m_PadTop == 0) && (conv2dLayerParams.m_PadBottom == 0) &&
590  (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_StrideX == 1) &&
591  (conv2dLayerParams.m_StrideY == 1) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
592  };
593 
594  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
595  &IsLayerOfType<armnn::InputLayer>,
596  &IsLayerOfType<armnn::PadLayer>,
597  checkSimpleConv2d,
598  &IsLayerOfType<armnn::OutputLayer>));
599 
601 
602  auto checkPadFoldedIntoConv2d = [](const armnn::Layer* const layer) -> bool {
603  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
604  const auto conv2dLayerParams = conv2dLayer->GetParameters();
605  return IsLayerOfType<armnn::Convolution2dLayer>(layer) && (layer->GetNameStr() == "folded-pad-into-conv2d") &&
606  (conv2dLayerParams.m_PadLeft == 2) && (conv2dLayerParams.m_PadRight == 2) &&
607  (conv2dLayerParams.m_PadTop == 2) && (conv2dLayerParams.m_PadBottom == 2) &&
608  (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_StrideX == 1) &&
609  (conv2dLayerParams.m_StrideY == 1) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
610  };
611 
612  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
613  &IsLayerOfType<armnn::InputLayer>,
614  checkPadFoldedIntoConv2d,
615  &IsLayerOfType<armnn::OutputLayer>));
616 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const Parameters & GetParameters() const
OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
This layer represents a pad operation.
Definition: PadLayer.hpp:14
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
This layer represents a convolution 2d operation.

◆ BOOST_AUTO_TEST_CASE() [17/24]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoPooling2dLayer  )

Definition at line 618 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Exclude, armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), armnn::IgnoreValue, Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

619 {
620  Graph graph;
621  const unsigned int inputShape[] = { 1, 2, 2, 3 };
622  const unsigned int paddedShape[] = { 1, 4, 4, 3 };
623  const unsigned int outputShape[] = { 1, 2, 2, 3 };
624 
625  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
626  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
627  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
628 
629  Layer* input = graph.AddLayer<InputLayer>(0, "input");
630  input->GetOutputSlot().SetTensorInfo(inputInfo);
631 
632  PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } });
633 
634  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
635  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
636 
637  Pooling2dDescriptor pooling2dDescriptor;
638  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
639  pooling2dDescriptor.m_PoolWidth = 3;
640  pooling2dDescriptor.m_PoolHeight = 3;
641  pooling2dDescriptor.m_StrideX = 1;
642  pooling2dDescriptor.m_StrideY = 1;
643  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
644 
645  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
646  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
647 
648  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
649 
650  // Connect up layers - input -> pad -> pool2d -> output
651  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
652  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
653  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
654 
655  auto checkSimplePool2d = [&](const armnn::Layer* const layer) {
656  const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
657  return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
658  (pool2dLayer->GetParameters() == pooling2dDescriptor);
659  };
660 
661  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
662  &IsLayerOfType<armnn::InputLayer>,
663  &IsLayerOfType<armnn::PadLayer>,
664  checkSimplePool2d,
665  &IsLayerOfType<armnn::OutputLayer>));
666 
668 
669  auto checkPadFoldedIntoPool2d = [&](const armnn::Layer* const layer) {
670  if (!IsLayerOfType<armnn::Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d"))
671  {
672  return false;
673  }
674 
675  const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
676  const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters();
677 
678  Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams;
679  pool2dLayerParamsNoPad.m_PadLeft = 0;
680  pool2dLayerParamsNoPad.m_PadRight = 0;
681  pool2dLayerParamsNoPad.m_PadTop = 0;
682  pool2dLayerParamsNoPad.m_PadBottom = 0;
683  // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude.
684  pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude;
685 
686  return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) &&
687  (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) &&
688  (pool2dLayerParams.m_PadBottom == 1) &&
689  (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
690  };
691 
692  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
693  &IsLayerOfType<armnn::InputLayer>,
694  checkPadFoldedIntoPool2d,
695  &IsLayerOfType<armnn::OutputLayer>));
696 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const Parameters & GetParameters() const
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ BOOST_AUTO_TEST_CASE() [18/24]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized  )

Definition at line 698 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

699 {
700  // In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other
701  // goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the
702  // OptimizeForExclusiveConnection method.
703  Graph graph;
704  const unsigned int inputShape[] = { 1, 2, 2, 3 };
705  const unsigned int paddedShape[] = { 1, 4, 4, 3 };
706  const unsigned int outputShape[] = { 1, 2, 2, 3 };
707 
708  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
709  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
710  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
711 
712  Layer* input = graph.AddLayer<InputLayer>(0, "input");
713  input->GetOutputSlot().SetTensorInfo(inputInfo);
714 
715  PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } });
716 
717  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
718  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
719 
720  Pooling2dDescriptor pooling2dDescriptor;
721  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
722  pooling2dDescriptor.m_PoolWidth = 3;
723  pooling2dDescriptor.m_PoolHeight = 3;
724  pooling2dDescriptor.m_StrideX = 1;
725  pooling2dDescriptor.m_StrideY = 1;
726  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
727 
728  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
729  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
730 
731  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
732 
733  // Connect up layers - input -> pad -> pool2d -> output
734  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
735  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
736  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
737 
738  // Add the alternative branch from the pas layer to an output layer.
739  Layer* secondOutput = graph.AddLayer<OutputLayer>(1, "dummy output");
740  padLayer->GetOutputSlot().Connect(secondOutput->GetInputSlot(0));
741 
742  auto checkSimplePool2d = [&](const armnn::Layer* const layer) {
743  const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
744  return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
745  (pool2dLayer->GetParameters() == pooling2dDescriptor);
746  };
747 
748  // Initial sequence.
749  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
750  &IsLayerOfType<armnn::InputLayer>,
751  &IsLayerOfType<armnn::PadLayer>,
752  checkSimplePool2d,
753  &IsLayerOfType<armnn::OutputLayer>,
754  &IsLayerOfType<armnn::OutputLayer>));
755 
757 
758  // The network should not change.
759  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
760  &IsLayerOfType<armnn::InputLayer>,
761  &IsLayerOfType<armnn::PadLayer>,
762  checkSimplePool2d,
763  &IsLayerOfType<armnn::OutputLayer>,
764  &IsLayerOfType<armnn::OutputLayer>));
765 }
const Parameters & GetParameters() const
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ BOOST_AUTO_TEST_CASE() [19/24]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding  )

Definition at line 767 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Exclude, armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

768 {
769  // In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization
770  // should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude.
771  Graph graph;
772  const unsigned int inputShape[] = { 1, 2, 2, 3 };
773  const unsigned int paddedShape[] = { 1, 4, 4, 3 };
774  const unsigned int outputShape[] = { 1, 2, 2, 3 };
775 
776  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
777  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
778  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
779 
780  Layer* input = graph.AddLayer<InputLayer>(0, "input");
781  input->GetOutputSlot().SetTensorInfo(inputInfo);
782 
783  PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } });
784 
785  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
786  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
787 
788  Pooling2dDescriptor pooling2dDescriptor;
789  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
790  pooling2dDescriptor.m_PoolWidth = 3;
791  pooling2dDescriptor.m_PoolHeight = 3;
792  pooling2dDescriptor.m_StrideX = 1;
793  pooling2dDescriptor.m_StrideY = 1;
794  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
795  // Include a pad with the pooling layer. This should prevent the optimization working.
796  pooling2dDescriptor.m_PadLeft = 1;
797  pooling2dDescriptor.m_PadRight = 1;
798  pooling2dDescriptor.m_PadTop = 1;
799  pooling2dDescriptor.m_PadBottom = 1;
800  pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
801 
802  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
803  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
804 
805  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
806 
807  // Connect up layers - input -> pad -> pool2d -> output
808  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
809  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
810  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
811 
812  auto checkSimplePool2d = [&](const armnn::Layer* const layer) {
813  const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
814  return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
815  (pool2dLayer->GetParameters() == pooling2dDescriptor);
816  };
817 
818  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
819  &IsLayerOfType<armnn::InputLayer>,
820  &IsLayerOfType<armnn::PadLayer>,
821  checkSimplePool2d,
822  &IsLayerOfType<armnn::OutputLayer>));
823 
825 
826  // The optimization should not have modified the graph.
827  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
828  &IsLayerOfType<armnn::InputLayer>,
829  &IsLayerOfType<armnn::PadLayer>,
830  checkSimplePool2d,
831  &IsLayerOfType<armnn::OutputLayer>));
832 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const Parameters & GetParameters() const
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ BOOST_AUTO_TEST_CASE() [20/24]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded  )

Definition at line 834 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, armnn::BOOST_AUTO_TEST_CASE(), Graph::cbegin(), Graph::cend(), CheckSequence(), IOutputSlot::Connect(), OutputSlot::Connect(), INetwork::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), Layer::GetInputSlot(), Layer::GetNameStr(), IConnectableLayer::GetOutputSlot(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), Pooling2dDescriptor::m_DataLayout, PadDescriptor::m_PadValue, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::MakeOptimizations(), armnn::Max, armnn::NHWC, Optimizer::Pass(), IOutputSlot::SetTensorInfo(), and OutputSlot::SetTensorInfo().

835 {
836  // In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization
837  // should not work as the pad value will modify the result of the max pooling layer.
838  Graph graph;
839  const unsigned int inputShape[] = { 1, 2, 2, 3 };
840  const unsigned int paddedShape[] = { 1, 4, 4, 3 };
841  const unsigned int outputShape[] = { 1, 2, 2, 3 };
842 
843  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
844  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
845  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
846 
847  Layer* input = graph.AddLayer<InputLayer>(0, "input");
848  input->GetOutputSlot().SetTensorInfo(inputInfo);
849 
850  PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } });
851  // For Max pooling of a float a pad value of 0 is more than enough to stop the fold happening.
852  // Set this to -std::numeric_limits<float>::infinity() to make the fold happen.
853  padDescriptor.m_PadValue = 0;
854 
855  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
856  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
857 
858  Pooling2dDescriptor pooling2dDescriptor;
859  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
860  pooling2dDescriptor.m_PoolWidth = 3;
861  pooling2dDescriptor.m_PoolHeight = 3;
862  pooling2dDescriptor.m_StrideX = 1;
863  pooling2dDescriptor.m_StrideY = 1;
864  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
865 
866  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
867  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
868 
869  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
870 
871  // Connect up layers - input -> pad -> pool2d -> output
872  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
873  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
874  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
875 
876  auto checkSimplePool2d = [&](const armnn::Layer* const layer) {
877  const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
878  return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
879  (pool2dLayer->GetParameters() == pooling2dDescriptor);
880  };
881 
882  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
883  &IsLayerOfType<armnn::InputLayer>,
884  &IsLayerOfType<armnn::PadLayer>,
885  checkSimplePool2d,
886  &IsLayerOfType<armnn::OutputLayer>));
887 
889 
890  // The optimization should not have modified the graph.
891  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
892  &IsLayerOfType<armnn::InputLayer>,
893  &IsLayerOfType<armnn::PadLayer>,
894  checkSimplePool2d,
895  &IsLayerOfType<armnn::OutputLayer>));
896 }
const Parameters & GetParameters() const
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
float m_PadValue
Optional value to use for padding, defaults to 0.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ BOOST_AUTO_TEST_CASE() [21/24]

BOOST_AUTO_TEST_CASE ( BackendCapabilityTest  )

Definition at line 1061 of file OptimizerTests.cpp.

References armnn::AsyncExecution, armnn::IsCapabilitySupported(), and armnn::NonConstWeights.

1062 {
1063  BackendId backendId = "MockBackend";
1064  // MockBackend does not support the NonConstWeights capability
1066 
1067  // MockBackend does not support the AsyncExecution capability
1069 }
Constant weights can be accessed through the descriptors, On the other hand, non-const weights can be...
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.

◆ BOOST_AUTO_TEST_CASE() [22/24]

BOOST_AUTO_TEST_CASE ( BackendHintTest  )

Definition at line 1071 of file OptimizerTests.cpp.

References armnn::AssignBackends(), armnn::BackendRegistryInstance(), Layer::BackendSelectionHint(), Graph::begin(), OutputSlot::Connect(), INetwork::Create(), Graph::end(), OptimizedNetworkImpl::GetGraph(), Layer::GetInputSlot(), Layer::GetOutputSlot(), armnn::IgnoreUnused(), OptimizationResult::IsOk(), armnn::Linear, and ActivationDescriptor::m_Function.

1072 {
1073  class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
1074  {
1075  public:
1076  void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
1077  {
1078  IgnoreUnused(id, name);
1079  auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
1080  BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
1081  }
1082 
1083  void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
1084  {
1085  IgnoreUnused(id, name);
1086  auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
1087  BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
1088  }
1089 
1090  void VisitActivationLayer(const IConnectableLayer* layer,
1091  const ActivationDescriptor& activationDescriptor,
1092  const char* name = nullptr) override
1093  {
1094  IgnoreUnused(activationDescriptor, name);
1095  auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
1096  BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
1097  }
1098  };
1099 
1100  struct CustomPolicy
1101  {
1102  static const BackendId& GetIdStatic()
1103  {
1104  static BackendId id = "CustomBackend";
1105  return id;
1106  }
1107  };
1108 
1109  struct MockPolicy
1110  {
1111  static const BackendId& GetIdStatic()
1112  {
1113  static BackendId id = "MockBackend";
1114  return id;
1115  }
1116  };
1117 
1118  auto& backendRegistry = BackendRegistryInstance();
1119 
1120  backendRegistry.Register("MockBackend", []() { return std::make_unique<MockBackend<MockPolicy>>(); });
1121 
1122  backendRegistry.Register("CustomBackend", []() { return std::make_unique<MockBackend<CustomPolicy>>(); });
1123 
1124  // Define the network
1125  auto network = INetwork::Create();
1126  ActivationDescriptor desc;
1127  desc.m_Function = ActivationFunction::Linear;
1128 
1129  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
1130  auto input = graph->AddLayer<InputLayer>(0, "input");
1131  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
1132  auto output = graph->AddLayer<OutputLayer>(0, "output");
1133 
1134  BackendId customBackendId("CustomBackend");
1135  act->BackendSelectionHint(customBackendId);
1136 
1137  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
1138  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1139 
1140  OptimizedNetworkImpl optNet(std::move(graph));
1141 
1142  // Get the optimized graph
1143  Graph& optGraph = optNet.GetGraph();
1144 
1145  std::vector<BackendId> prefs{ "MockBackend", "CustomBackend" };
1146 
1147  BackendIdSet availableBackends = { "CustomBackend", "MockBackend" };
1148  DeviceSpec spec(availableBackends);
1149 
1150  BackendSettings backendSettings(prefs, spec);
1151 
1152  // Assign an available backend to each layer
1153  Graph::Iterator firstLayer = optGraph.begin();
1154  Graph::Iterator lastLayer = optGraph.end();
1155 
1156  OptimizedNetworkImpl* optNetObjPtr = &optNet;
1157  OptimizationResult res = AssignBackends(optNetObjPtr,
1158  backendSettings,
1159  firstLayer,
1160  lastLayer,
1161  EmptyOptional());
1162 
1163  BOOST_TEST(res.IsOk());
1164 
1165  TestBackendAssignment visitor;
1166  for (auto it = firstLayer; it != lastLayer; ++it)
1167  {
1168  (*it)->Accept(visitor);
1169  }
1170 }
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:162
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:888
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void VisitInputLayer(const IConnectableLayer *, LayerBindingId, const char *) override
Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked...
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:330
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
void IgnoreUnused(Ts &&...)
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:243
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
Visitor base class with empty implementations.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:164
void VisitOutputLayer(const IConnectableLayer *, LayerBindingId, const char *) override
Function an output layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void VisitActivationLayer(const IConnectableLayer *, const ActivationDescriptor &, const char *) override
Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is inv...

◆ BOOST_AUTO_TEST_CASE() [23/24]

BOOST_AUTO_TEST_CASE ( OptimizeForExclusiveConnectionsFuseTest  )

Definition at line 1173 of file OptimizerTests.cpp.

References Graph::AddLayer(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Graph::GetNumLayers(), Layer::GetOutputSlot(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, BatchNormalizationDescriptor::m_DataLayout, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

1174 {
1175  using namespace armnn;
1176  // Define layers information
1177  Convolution2dDescriptor convolution2dDescriptor;
1178  convolution2dDescriptor.m_BiasEnabled = false;
1179  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
1180  BatchNormalizationDescriptor batchNormDescriptor;
1181  batchNormDescriptor.m_DataLayout = DataLayout::NHWC;
1182 
1183  const unsigned int inputDimensionSizes[] = { 1, 4, 4, 3 }; // NHWCin
1184  const unsigned int weightsDimensionSizes[] = { 1, 2, 2, 3 }; // CoutHWCin
1185  const unsigned int outputDimensionSizes[] = { 1, 3, 3, 1 }; // NHWCout
1186  const unsigned int outputChannelSize[] = { outputDimensionSizes[3] }; // Cout
1187 
1188  TensorInfo inputInfo(4, inputDimensionSizes, DataType::Float32);
1189  TensorInfo outputInfo(4, outputDimensionSizes, DataType::Float32);
1190 
1191  std::vector<float> weightsVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
1192  ConstTensor weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32), weightsVector);
1193 
1194  std::vector<float> betaVector = { 0.1f };
1195  std::vector<float> gammaVector = { 0.5f };
1196  std::vector<float> meanVector = { 0 };
1197  std::vector<float> varianceVector = { 1 };
1198  ConstTensor beta(TensorInfo(1, outputChannelSize, DataType::Float32), betaVector);
1199  ConstTensor gamma(TensorInfo(1, outputChannelSize, DataType::Float32), gammaVector);
1200  ConstTensor mean(TensorInfo(1, outputChannelSize, DataType::Float32), meanVector);
1201  ConstTensor variance(TensorInfo(1, outputChannelSize, DataType::Float32), varianceVector);
1202 
1203  // Define the network
1204  Graph graph;
1205  auto input = graph.AddLayer<InputLayer>(0, "input");
1206  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
1207  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
1208  auto output = graph.AddLayer<OutputLayer>(0, "output");
1209 
1210  // Set layer information
1211  input->GetOutputSlot().SetTensorInfo(inputInfo);
1212  conv->GetOutputSlot().SetTensorInfo(outputInfo);
1213  batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
1214  conv->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
1215  batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
1216  batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
1217  batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
1218  batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
1219  if (convolution2dDescriptor.m_BiasEnabled)
1220  {
1221  std::vector<float> biasVector = { 11 };
1222  ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32), biasVector);
1223  conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
1224  }
1225 
1226  // Connect layers
1227  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
1228  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
1229  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1230 
1231  BOOST_CHECK(4 == graph.GetNumLayers());
1232  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
1233  &IsLayerOfType<InputLayer>,
1234  &IsLayerOfType<Convolution2dLayer>,
1235  &IsLayerOfType<BatchNormalizationLayer>,
1236  &IsLayerOfType<OutputLayer>));
1237 
1238  // Optimize graph
1240 
1241  auto checkFusedConv2d = [](const armnn::Layer* const layer) -> bool {
1242  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
1243  (layer->GetNameStr() == "fused-batchNorm-into-convolution");
1244  };
1245 
1246  BOOST_CHECK(3 == graph.GetNumLayers());
1247  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
1248  &IsLayerOfType<InputLayer>,
1249  checkFusedConv2d,
1250  &IsLayerOfType<OutputLayer>));
1251 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a batch normalization operation.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
Copyright (c) 2021 ARM Limited and Contributors.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
This layer represents a convolution 2d operation.
size_t GetNumLayers() const
Definition: Graph.hpp:191
A BatchNormalizationDescriptor for the BatchNormalizationLayer.

◆ BOOST_AUTO_TEST_CASE() [24/24]

BOOST_AUTO_TEST_CASE ( OptimizeForExclusiveConnectionsWithoutFuseTest  )

Definition at line 1254 of file OptimizerTests.cpp.

References Graph::AddLayer(), BOOST_AUTO_TEST_SUITE_END(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), Layer::GetInputSlot(), Graph::GetNumLayers(), Layer::GetOutputSlot(), armnn::MakeOptimizations(), and Optimizer::Pass().

1255 {
1256  // Define the network
1257  Graph graph;
1258  Convolution2dDescriptor convolution2dDescriptor;
1259  BatchNormalizationDescriptor batchNormDescriptor;
1260 
1261  auto input = graph.AddLayer<InputLayer>(0, "input");
1262  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
1263  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
1264  auto output = graph.AddLayer<OutputLayer>(0, "output");
1265  auto output2 = graph.AddLayer<OutputLayer>(1, "output2");
1266 
1267  // Connect layers
1268  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
1269  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
1270  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1271  conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
1272 
1273  BOOST_CHECK(5 == graph.GetNumLayers());
1274  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
1275  &IsLayerOfType<armnn::InputLayer>,
1276  &IsLayerOfType<armnn::Convolution2dLayer>,
1277  &IsLayerOfType<armnn::BatchNormalizationLayer>,
1278  &IsLayerOfType<armnn::OutputLayer>,
1279  &IsLayerOfType<armnn::OutputLayer>));
1280  // Optimize graph
1282 
1283  BOOST_CHECK(5 == graph.GetNumLayers());
1284  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
1285  &IsLayerOfType<armnn::InputLayer>,
1286  &IsLayerOfType<armnn::Convolution2dLayer>,
1287  &IsLayerOfType<armnn::BatchNormalizationLayer>,
1288  &IsLayerOfType<armnn::OutputLayer>,
1289  &IsLayerOfType<armnn::OutputLayer>));
1290 }
This layer represents a batch normalization operation.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
This layer represents a convolution 2d operation.
size_t GetNumLayers() const
Definition: Graph.hpp:191
A BatchNormalizationDescriptor for the BatchNormalizationLayer.

◆ CreateConvolution2dGraph()

void CreateConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 259 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

262 {
263  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
264  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
265 
266  std::vector<float> weightsVector(90);
267  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
268 
270  desc.m_BiasEnabled = false;
271  desc.m_StrideX = 1;
272  desc.m_StrideY = 1;
273  desc.m_DataLayout = dataLayout;
274 
275  Layer* input = graph.AddLayer<InputLayer>(0, "input");
276  input->GetOutputSlot().SetTensorInfo(inputInfo);
277 
278  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
279  layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
280  layer->GetOutputSlot().SetTensorInfo(outputInfo);
281 
282  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
283  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
284  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
285 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
This layer represents a convolution 2d operation.

◆ CreateDepthwiseConvolution2dGraph()

void CreateDepthwiseConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 309 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

312 {
313  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
314  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
315 
316  std::vector<float> weightsVector(18);
317  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
318 
320  desc.m_BiasEnabled = false;
321  desc.m_StrideX = 1;
322  desc.m_StrideY = 1;
323  desc.m_DataLayout = dataLayout;
324 
325  Layer* input = graph.AddLayer<InputLayer>(0, "input");
326  input->GetOutputSlot().SetTensorInfo(inputInfo);
327 
328  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
329  layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
330  layer->GetOutputSlot().SetTensorInfo(outputInfo);
331 
332  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
333  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
334  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
335 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.

◆ CreateGatherGraph()

void CreateGatherGraph ( Graph graph,
const armnn::TensorInfo paramsInfo,
const armnn::TensorInfo indicesInfo,
const armnn::TensorInfo outputInfo 
)

Definition at line 452 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

456 {
457  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
458  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
459 
460  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
461  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
462 
463  GatherDescriptor descriptor;
464  GatherLayer* layer = graph.AddLayer<GatherLayer>(descriptor, "gather");
465  layer->GetOutputSlot().SetTensorInfo(outputInfo);
466 
467  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
468  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
469  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
470  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
471 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
A GatherDescriptor for the GatherLayer.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318

◆ CreatePooling2dGraph()

void CreatePooling2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 359 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, OutputSlot::Connect(), armnn::Exclude, armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

361 {
362  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
363  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
364 
365  Pooling2dDescriptor desc;
367  desc.m_PoolWidth = desc.m_PoolHeight = 100;
368  desc.m_StrideX = desc.m_StrideY = 5;
369  desc.m_PadLeft = 50;
370  desc.m_PadRight = 50;
371  desc.m_PadTop = 50;
372  desc.m_PadBottom = 50;
374  desc.m_DataLayout = dataLayout;
375 
376  Layer* input = graph.AddLayer<InputLayer>(0, "input");
377  input->GetOutputSlot().SetTensorInfo(inputInfo);
378 
379  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
380  layer->GetOutputSlot().SetTensorInfo(outputInfo);
381 
382  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
383  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
384  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
385 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ CreateResizeBilinearGraph()

void CreateResizeBilinearGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 407 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Bilinear, OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

411 {
412  TensorInfo inputInfo(4, inputShape, DataType::Float32);
413  TensorInfo outputInfo(4, outputShape, DataType::Float32);
414 
415  ResizeDescriptor desc;
416  desc.m_Method = ResizeMethod::Bilinear;
417  desc.m_TargetHeight = 3;
418  desc.m_TargetWidth = 4;
419  desc.m_DataLayout = dataLayout;
420 
421  Layer* input = graph.AddLayer<InputLayer>(0, "input");
422  input->GetOutputSlot().SetTensorInfo(inputInfo);
423 
424  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
425  layer->GetOutputSlot().SetTensorInfo(outputInfo);
426 
427  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
428  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
429  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
430 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
A ResizeDescriptor for the ResizeLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_TargetWidth
Target width value.
uint32_t m_TargetHeight
Target height value.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13