ArmNN
 20.08
OptimizerTests.cpp File Reference

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
 
 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
 
 BOOST_AUTO_TEST_CASE (InsertConvertersTest)
 
void CreateConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputsNhwc)
 
void CreateDepthwiseConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
 
void CreatePooling2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputsNhwc)
 
void CreateResizeBilinearGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputsNhwc)
 
void CreateGatherGraph (Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs1DParams)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputsMultiDimIndices)
 
 BOOST_AUTO_TEST_CASE (DetectionPostProcessValidateTensorShapes)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoConvolution2dLayer)
 
 BOOST_AUTO_TEST_CASE (BackendHintTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/17]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGDisabledTest  )

Definition at line 145 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

146 {
147  Graph graph;
148 
149  //Helper function creates graph containing LSTM layer with required input and output layers
150  CreateLSTMLayerHelper(graph, false);
151 
152  //This function used to call ValidateShapesFromInputs();
153  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
154 }
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [2/17]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGEnabledTest  )

Definition at line 156 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

157 {
158  Graph graph;
159 
160  //Helper function creates graph containing LSTM layer with required input and output layers
161  CreateLSTMLayerHelper(graph, true);
162 
163  //This function used to call ValidateShapesFromInputs();
164  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
165 }
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [3/17]

BOOST_AUTO_TEST_CASE ( InsertConvertersTest  )

Definition at line 167 of file OptimizerTests.cpp.

References armnn::Addition, ARMNN_ASSERT, CheckSequence(), armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Float16, armnn::Float32, armnn::Floor, TensorInfo::GetDataType(), Layer::GetDataType(), Layer::GetInputSlot(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::info, armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), and OutputHandler::SetTensorInfo().

168 {
169  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
170 
171  armnn::Graph graph;
172 
173  armnn::LayerBindingId inputId = 0;
174 
175  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
176 
177  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
178  head->GetOutputHandler().SetTensorInfo(info);
179 
180  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
181  ->GetOutputHandler().SetTensorInfo(info);
182 
183  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
184  head->GetOutputHandler().SetTensorInfo(info);
185 
186  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
187  head->GetOutputHandler().SetTensorInfo(info);
188 
189  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
190  ->GetOutputHandler().SetTensorInfo(info);
191 
192  // Check graph layer sequence before inserting convert layers
193  BOOST_TEST(CheckSequence(graph.cbegin(),
194  graph.cend(),
195  &IsLayerOfType<armnn::InputLayer>,
196  &IsLayerOfType<armnn::InputLayer>,
197  &IsLayerOfType<armnn::MemCopyLayer>,
198  &IsLayerOfType<armnn::FloorLayer>,
199  &IsLayerOfType<armnn::AdditionLayer>,
200  &IsLayerOfType<armnn::OutputLayer>));
201 
202  // Check layers have Float16 DataType
203  for (auto& layer : graph)
204  {
205  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
206  {
207  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
208  ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
209  }
210  }
211 
212  // Insert convert layers either side of unsupported layer
213  for (auto& layer : graph)
214  {
215  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
216  {
218  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
219  }
220  }
221 
222  // Check layers have correct DataType after inserting convert layers
223  for (auto& layer : graph)
224  {
225  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
226  {
227  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
228  ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
229  }
230  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
231  {
232  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
233  ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
234  }
235  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
236  {
237  ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
238  ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
239  }
240  }
241 
242  // Check sequence of layers after inserting convert layers
243  BOOST_TEST(CheckSequence(graph.cbegin(),
244  graph.cend(),
245  &IsLayerOfType<armnn::InputLayer>,
246  &IsLayerOfType<armnn::InputLayer>,
247  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
248  &IsLayerOfType<armnn::MemCopyLayer>,
249  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
250  &IsLayerOfType<armnn::FloorLayer>,
251  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
252  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
253  &IsLayerOfType<armnn::AdditionLayer>,
254  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
255  &IsLayerOfType<armnn::OutputLayer>));
256 }
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:194
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
This layer represents an addition operation.
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21

◆ BOOST_AUTO_TEST_CASE() [4/17]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputs  )

Definition at line 288 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), and Graph::InferTensorInfos().

289 {
290  Graph graph;
291  const unsigned int inputShape[] = { 1, 3, 8, 16 };
292  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
293  const unsigned int outputShape[] = { 1, 2, 4, 14 };
294  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
295 
296  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
297 }
void InferTensorInfos()
Definition: Graph.cpp:492
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [5/17]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 299 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

300 {
301  Graph graph;
302  const unsigned int inputShape[] = { 1, 8, 16, 3 };
303  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
304  const unsigned int outputShape[] = { 1, 4, 14, 2 };
305  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
306 
307  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
308 }
void InferTensorInfos()
Definition: Graph.cpp:492
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [6/17]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputs  )

Definition at line 338 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), and Graph::InferTensorInfos().

339 {
340  Graph graph;
341  const unsigned int inputShape[] = { 1, 2, 3, 3 };
342  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
343  const unsigned int outputShape[] = { 1, 2, 1, 1 };
344  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
345 
346  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
347 }
void InferTensorInfos()
Definition: Graph.cpp:492
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [7/17]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 349 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

350 {
351  Graph graph;
352  const unsigned int inputShape[] = { 1, 3, 3, 2 };
353  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
354  const unsigned int outputShape[] = { 1, 1, 1, 2 };
355  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
356 
357  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
358 }
void InferTensorInfos()
Definition: Graph.cpp:492
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [8/17]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputs  )

Definition at line 388 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NCHW.

389 {
390  Graph graph;
391  const unsigned int inputShape[] = { 5, 3, 52, 60 };
392  const unsigned int outputShape[] = { 5, 3, 11, 13 };
393  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
394 
395  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
396 }
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [9/17]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputsNhwc  )

Definition at line 398 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

399 {
400  Graph graph;
401  const unsigned int inputShape[] = { 5, 52, 60, 3 };
402  const unsigned int outputShape[] = { 5, 11, 13, 3 };
403  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
404 
405  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
406 }
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [10/17]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputs  )

Definition at line 431 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), and Graph::InferTensorInfos().

432 {
433  Graph graph;
434  const unsigned int inputShape[] = { 1, 2, 4, 5 };
435  const unsigned int outputShape[] = { 1, 2, 3, 4 };
436  CreateResizeBilinearGraph(graph, inputShape, outputShape);
437 
438  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
439 }
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [11/17]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputsNhwc  )

Definition at line 441 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

442 {
443  Graph graph;
444  const unsigned int inputShape[] = { 1, 4, 5, 2 };
445  const unsigned int outputShape[] = { 1, 3, 4, 2 };
446  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
447 
448  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
449 }
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [12/17]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs  )

Definition at line 471 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

472 {
473  Graph graph;
474  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
475  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
476  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
477 
478  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
479 
480  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
481 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [13/17]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs1DParams  )

Definition at line 483 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

484 {
485  Graph graph;
486  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
487  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
488  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
489 
490  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
491 
492  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
493 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [14/17]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputsMultiDimIndices  )

Definition at line 495 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

496 {
497  Graph graph;
498  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
499  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
500  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
501 
502  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
503 
504  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
505 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:492

◆ BOOST_AUTO_TEST_CASE() [15/17]

BOOST_AUTO_TEST_CASE ( DetectionPostProcessValidateTensorShapes  )

Definition at line 507 of file OptimizerTests.cpp.

References Graph::AddLayer(), anchors(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), Graph::InferTensorInfos(), DetectionPostProcessLayer::m_Anchors, DetectionPostProcessDescriptor::m_MaxDetections, armnn::QAsymmU8, scoresInfo, and OutputSlot::SetTensorInfo().

508 {
509  Graph graph;
510  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
511  armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
512  std::vector<uint8_t> anchorsVector(40);
514 
515  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
516  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
517  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
518  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
519 
520  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
521  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
522 
523  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
525 
527  descriptor.m_MaxDetections = 3;
528 
529  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
530  layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
531  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
532  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
533  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
534  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
535 
536  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
537  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
538 
539  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
540 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
This layer represents a detection postprocess operator.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
void InferTensorInfos()
Definition: Graph.cpp:492
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ BOOST_AUTO_TEST_CASE() [16/17]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoConvolution2dLayer  )

Definition at line 542 of file OptimizerTests.cpp.

References Graph::AddLayer(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), armnn::IsActivationSupported(), armnn::IsInputSupported(), armnn::IsOutputSupported(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

543 {
544  Graph graph;
545  const unsigned int inputShape[] = { 1, 2, 2, 3 };
546  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
547  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
548  const unsigned int outputShape[] = { 1, 2, 1, 1 };
549 
550 
551  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
552  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
553  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
554 
555  Layer* input = graph.AddLayer<InputLayer>(0, "input");
556  input->GetOutputSlot().SetTensorInfo(inputInfo);
557 
558  PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
559 
560  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
561  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
562 
563  Convolution2dDescriptor convolution2dDescriptor;
564  convolution2dDescriptor.m_BiasEnabled = false;
565  convolution2dDescriptor.m_StrideX = 1;
566  convolution2dDescriptor.m_StrideY = 1;
567  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
568 
569  std::vector<float> weightsVector(18);
570  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
571 
572  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d");
573  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
574  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
575 
576  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
577 
578  // Connect up layers - input -> pad -> conv2d -> output
579  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
580  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
581  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
582 
583  auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool
584  {
585  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
586  const auto conv2dLayerParams = conv2dLayer->GetParameters();
587  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
588  (layer->GetNameStr() == "conv2d") &&
589  (conv2dLayerParams.m_PadLeft == 0) &&
590  (conv2dLayerParams.m_PadRight == 0) &&
591  (conv2dLayerParams.m_PadTop == 0) &&
592  (conv2dLayerParams.m_PadBottom == 0) &&
593  (conv2dLayerParams.m_BiasEnabled == false) &&
594  (conv2dLayerParams.m_StrideX == 1) &&
595  (conv2dLayerParams.m_StrideY == 1) &&
596  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
597  };
598 
599  BOOST_TEST(CheckSequence(graph.cbegin(),
600  graph.cend(),
601  &IsLayerOfType<armnn::InputLayer>,
602  &IsLayerOfType<armnn::PadLayer>,
603  checkSimpleConv2d,
604  &IsLayerOfType<armnn::OutputLayer>));
605 
607 
608  auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool
609  {
610  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
611  const auto conv2dLayerParams = conv2dLayer->GetParameters();
612  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
613  (layer->GetNameStr() == "folded-pad-into-conv2d") &&
614  (conv2dLayerParams.m_PadLeft == 2) &&
615  (conv2dLayerParams.m_PadRight == 2) &&
616  (conv2dLayerParams.m_PadTop == 2) &&
617  (conv2dLayerParams.m_PadBottom == 2) &&
618  (conv2dLayerParams.m_BiasEnabled == false) &&
619  (conv2dLayerParams.m_StrideX == 1) &&
620  (conv2dLayerParams.m_StrideY == 1) &&
621  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
622  };
623 
624  BOOST_TEST(CheckSequence(graph.cbegin(),
625  graph.cend(),
626  &IsLayerOfType<armnn::InputLayer>,
627  checkPadFoldedIntoConv2d,
628  &IsLayerOfType<armnn::OutputLayer>));
629 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const Parameters & GetParameters() const
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:173
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
This layer represents a pad operation.
Definition: PadLayer.hpp:14
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:175
This layer represents a convolution 2d operation.

◆ BOOST_AUTO_TEST_CASE() [17/17]

BOOST_AUTO_TEST_CASE ( BackendHintTest  )

Definition at line 690 of file OptimizerTests.cpp.

References armnn::AssignBackends(), armnn::BackendRegistryInstance(), Layer::BackendSelectionHint(), BOOST_AUTO_TEST_SUITE_END(), OutputSlot::Connect(), INetwork::Create(), IOptimizedNetwork::Destroy(), OptimizedNetwork::GetGraph(), Layer::GetInputSlot(), Layer::GetOutputSlot(), armnn::IgnoreUnused(), OptimizationResult::IsOk(), armnn::Linear, and ActivationDescriptor::m_Function.

691 {
692  class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
693  {
694  public:
695  void VisitInputLayer(const IConnectableLayer* layer,
696  LayerBindingId id,
697  const char* name = nullptr) override
698  {
699  IgnoreUnused(id, name);
700  auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
701  BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
702  }
703 
704  void VisitOutputLayer(const IConnectableLayer* layer,
705  LayerBindingId id,
706  const char* name = nullptr) override
707  {
708  IgnoreUnused(id, name);
709  auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
710  BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
711  }
712 
713  void VisitActivationLayer(const IConnectableLayer* layer,
714  const ActivationDescriptor& activationDescriptor,
715  const char* name = nullptr) override
716  {
717  IgnoreUnused(activationDescriptor, name);
718  auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
719  BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
720  }
721  };
722 
723  struct CustomPolicy
724  {
725  static const BackendId& GetIdStatic()
726  {
727  static BackendId id="CustomBackend";
728  return id;
729  }
730  };
731 
732  struct MockPolicy
733  {
734  static const BackendId& GetIdStatic()
735  {
736  static BackendId id="MockBackend";
737  return id;
738  }
739  };
740 
741  auto& backendRegistry = BackendRegistryInstance();
742 
743  backendRegistry.Register("MockBackend", [](){
744  return std::make_unique<MockBackend<MockPolicy>>();
745  });
746 
747  backendRegistry.Register("CustomBackend", [](){
748  return std::make_unique<MockBackend<CustomPolicy>>();
749  });
750 
751  // Define the network
752  auto network = INetwork::Create();
754  desc.m_Function = ActivationFunction::Linear;
755 
756  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
757  auto input = graph->AddLayer<InputLayer>(0, "input");
758  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
759  auto output = graph->AddLayer<OutputLayer>(0, "output");
760 
761  BackendId customBackendId("CustomBackend");
762  act->BackendSelectionHint(customBackendId);
763 
764  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
765  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
766 
767 
768  auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
769 
770  OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
771 
772  // Get the optimized graph
773  Graph& optGraph = optNetObjPtr->GetGraph();
774 
775 
776  std::vector<BackendId> prefs{"MockBackend", "CustomBackend"};
777 
778  BackendIdSet availableBackends = {"CustomBackend", "MockBackend"};
779  DeviceSpec spec(availableBackends);
780 
781  BackendSettings backendSettings(prefs, spec);
782 
783  // Assign an available backend to each layer
784  Graph::Iterator firstLayer = optGraph.begin();
785  Graph::Iterator lastLayer = optGraph.end();
786  OptimizationResult res = AssignBackends(optNetObjPtr,
787  backendSettings,
788  firstLayer,
789  lastLayer,
790  EmptyOptional());
791 
792  BOOST_TEST(res.IsOk());
793 
794  TestBackendAssignment visitor;
795  for (auto it =firstLayer; it != lastLayer; ++it)
796  {
797  (*it)->Accept(visitor);
798  }
799 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
void VisitInputLayer(const IConnectableLayer *, LayerBindingId, const char *) override
Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked...
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:326
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
void IgnoreUnused(Ts &&...)
LayerList::const_iterator Iterator
Definition: Graph.hpp:51
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:194
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
Visitor base class with empty implementations.
OptimizationResult AssignBackends(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:382
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void VisitOutputLayer(const IConnectableLayer *, LayerBindingId, const char *) override
Function an output layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
void VisitActivationLayer(const IConnectableLayer *, const ActivationDescriptor &, const char *) override
Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is inv...

◆ CreateConvolution2dGraph()

void CreateConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 260 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

263 {
264  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
265  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
266 
267  std::vector<float> weightsVector(90);
268  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
269 
271  desc.m_BiasEnabled = false;
272  desc.m_StrideX = 1;
273  desc.m_StrideY = 1;
274  desc.m_DataLayout = dataLayout;
275 
276  Layer* input = graph.AddLayer<InputLayer>(0, "input");
277  input->GetOutputSlot().SetTensorInfo(inputInfo);
278 
279  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
280  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
281  layer->GetOutputSlot().SetTensorInfo(outputInfo);
282 
283  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
284  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
285  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
286 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
This layer represents a convolution 2d operation.

◆ CreateDepthwiseConvolution2dGraph()

void CreateDepthwiseConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 310 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

313 {
314  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
315  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
316 
317  std::vector<float> weightsVector(18);
318  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
319 
321  desc.m_BiasEnabled = false;
322  desc.m_StrideX = 1;
323  desc.m_StrideY = 1;
324  desc.m_DataLayout = dataLayout;
325 
326  Layer* input = graph.AddLayer<InputLayer>(0, "input");
327  input->GetOutputSlot().SetTensorInfo(inputInfo);
328 
329  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
330  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
331  layer->GetOutputSlot().SetTensorInfo(outputInfo);
332 
333  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
334  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
335  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
336 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.

◆ CreateGatherGraph()

void CreateGatherGraph ( Graph graph,
const armnn::TensorInfo paramsInfo,
const armnn::TensorInfo indicesInfo,
const armnn::TensorInfo outputInfo 
)

Definition at line 452 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

454 {
455  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
456  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
457 
458  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
459  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
460 
461  GatherDescriptor descriptor;
462  GatherLayer* layer = graph.AddLayer<GatherLayer>(descriptor, "gather");
463  layer->GetOutputSlot().SetTensorInfo(outputInfo);
464 
465  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
466  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
467  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
468  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
469 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
A GatherDescriptor for the GatherLayer.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314

◆ CreatePooling2dGraph()

void CreatePooling2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 360 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, OutputSlot::Connect(), armnn::Exclude, armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

362 {
363  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
364  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
365 
366  Pooling2dDescriptor desc;
368  desc.m_PoolWidth = desc.m_PoolHeight = 100;
369  desc.m_StrideX = desc.m_StrideY = 5;
370  desc.m_PadLeft = 50;
371  desc.m_PadRight = 50;
372  desc.m_PadTop = 50;
373  desc.m_PadBottom = 50;
375  desc.m_DataLayout = dataLayout;
376 
377  Layer* input = graph.AddLayer<InputLayer>(0, "input");
378  input->GetOutputSlot().SetTensorInfo(inputInfo);
379 
380  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
381  layer->GetOutputSlot().SetTensorInfo(outputInfo);
382 
383  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
384  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
385  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
386 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
uint32_t m_PoolWidth
Pooling width value.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ CreateResizeBilinearGraph()

void CreateResizeBilinearGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 408 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Bilinear, OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

410 {
411  TensorInfo inputInfo(4, inputShape, DataType::Float32);
412  TensorInfo outputInfo(4, outputShape, DataType::Float32);
413 
414  ResizeDescriptor desc;
415  desc.m_Method = ResizeMethod::Bilinear;
416  desc.m_TargetHeight = 3;
417  desc.m_TargetWidth = 4;
418  desc.m_DataLayout = dataLayout;
419 
420  Layer* input = graph.AddLayer<InputLayer>(0, "input");
421  input->GetOutputSlot().SetTensorInfo(inputInfo);
422 
423  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
424  layer->GetOutputSlot().SetTensorInfo(outputInfo);
425 
426  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
427  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
428  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
429 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
A ResizeDescriptor for the ResizeLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_TargetWidth
Target width value.
uint32_t m_TargetHeight
Target height value.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13