ArmNN
 20.02
OptimizerTests.cpp File Reference

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
 
 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
 
 BOOST_AUTO_TEST_CASE (InsertConvertersTest)
 
void CreateConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputsNhwc)
 
void CreateDepthwiseConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
 
void CreatePooling2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputsNhwc)
 
void CreateResizeBilinearGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputsNhwc)
 
void CreateGatherGraph (Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs1DParams)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputsMultiDimIndices)
 
 BOOST_AUTO_TEST_CASE (DetectionPostProcessValidateTensorShapes)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoConvolution2dLayer)
 
 BOOST_AUTO_TEST_CASE (BackendHintTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/17]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGDisabledTest  )

Definition at line 141 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

142 {
143  Graph graph;
144 
145  //Helper function creates graph containing LSTM layer with required input and output layers
146  CreateLSTMLayerHelper(graph, false);
147 
148  //This function used to call ValidateShapesFromInputs();
149  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
150 }
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [2/17]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGEnabledTest  )

Definition at line 152 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

153 {
154  Graph graph;
155 
156  //Helper function creates graph containing LSTM layer with required input and output layers
157  CreateLSTMLayerHelper(graph, true);
158 
159  //This function used to call ValidateShapesFromInputs();
160  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
161 }
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [3/17]

BOOST_AUTO_TEST_CASE ( InsertConvertersTest  )

Definition at line 163 of file OptimizerTests.cpp.

References armnn::Addition, CheckSequence(), armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Float16, armnn::Float32, armnn::Floor, TensorInfo::GetDataType(), Layer::GetDataType(), Layer::GetInputSlot(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::info, armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), and OutputHandler::SetTensorInfo().

164 {
165  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
166 
167  armnn::Graph graph;
168 
169  armnn::LayerBindingId inputId = 0;
170 
171  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
172 
173  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
174  head->GetOutputHandler().SetTensorInfo(info);
175 
176  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
177  ->GetOutputHandler().SetTensorInfo(info);
178 
179  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
180  head->GetOutputHandler().SetTensorInfo(info);
181 
182  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
183  head->GetOutputHandler().SetTensorInfo(info);
184 
185  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
186  ->GetOutputHandler().SetTensorInfo(info);
187 
188  // Check graph layer sequence before inserting convert layers
189  BOOST_TEST(CheckSequence(graph.cbegin(),
190  graph.cend(),
191  &IsLayerOfType<armnn::InputLayer>,
192  &IsLayerOfType<armnn::InputLayer>,
193  &IsLayerOfType<armnn::MemCopyLayer>,
194  &IsLayerOfType<armnn::FloorLayer>,
195  &IsLayerOfType<armnn::AdditionLayer>,
196  &IsLayerOfType<armnn::OutputLayer>));
197 
198  // Check layers have Float16 DataType
199  for (auto& layer : graph)
200  {
201  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
202  {
203  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
204  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
205  }
206  }
207 
208  // Insert convert layers either side of unsupported layer
209  for (auto& layer : graph)
210  {
211  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
212  {
214  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
215  }
216  }
217 
218  // Check layers have correct DataType after inserting convert layers
219  for (auto& layer : graph)
220  {
221  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
222  {
223  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
224  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
225  }
226  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
227  {
228  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
229  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
230  }
231  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
232  {
233  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
234  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
235  }
236  }
237 
238  // Check sequence of layers after inserting convert layers
239  BOOST_TEST(CheckSequence(graph.cbegin(),
240  graph.cend(),
241  &IsLayerOfType<armnn::InputLayer>,
242  &IsLayerOfType<armnn::InputLayer>,
243  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
244  &IsLayerOfType<armnn::MemCopyLayer>,
245  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
246  &IsLayerOfType<armnn::FloorLayer>,
247  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
248  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
249  &IsLayerOfType<armnn::AdditionLayer>,
250  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
251  &IsLayerOfType<armnn::OutputLayer>));
252 }
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a memory copy operation.
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
This layer represents an addition operation.
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20

◆ BOOST_AUTO_TEST_CASE() [4/17]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputs  )

Definition at line 284 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), and Graph::InferTensorInfos().

285 {
286  Graph graph;
287  const unsigned int inputShape[] = { 1, 3, 8, 16 };
288  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
289  const unsigned int outputShape[] = { 1, 2, 4, 14 };
290  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
291 
292  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
293 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [5/17]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 295 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

296 {
297  Graph graph;
298  const unsigned int inputShape[] = { 1, 8, 16, 3 };
299  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
300  const unsigned int outputShape[] = { 1, 4, 14, 2 };
301  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
302 
303  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
304 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [6/17]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputs  )

Definition at line 334 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), and Graph::InferTensorInfos().

335 {
336  Graph graph;
337  const unsigned int inputShape[] = { 1, 2, 3, 3 };
338  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
339  const unsigned int outputShape[] = { 1, 2, 1, 1 };
340  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
341 
342  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
343 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [7/17]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 345 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

346 {
347  Graph graph;
348  const unsigned int inputShape[] = { 1, 3, 3, 2 };
349  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
350  const unsigned int outputShape[] = { 1, 1, 1, 2 };
351  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
352 
353  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
354 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [8/17]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputs  )

Definition at line 384 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NCHW.

385 {
386  Graph graph;
387  const unsigned int inputShape[] = { 5, 3, 52, 60 };
388  const unsigned int outputShape[] = { 5, 3, 11, 13 };
389  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
390 
391  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
392 }
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [9/17]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputsNhwc  )

Definition at line 394 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

395 {
396  Graph graph;
397  const unsigned int inputShape[] = { 5, 52, 60, 3 };
398  const unsigned int outputShape[] = { 5, 11, 13, 3 };
399  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
400 
401  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
402 }
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [10/17]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputs  )

Definition at line 427 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), and Graph::InferTensorInfos().

428 {
429  Graph graph;
430  const unsigned int inputShape[] = { 1, 2, 4, 5 };
431  const unsigned int outputShape[] = { 1, 2, 3, 4 };
432  CreateResizeBilinearGraph(graph, inputShape, outputShape);
433 
434  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
435 }
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [11/17]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputsNhwc  )

Definition at line 437 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

438 {
439  Graph graph;
440  const unsigned int inputShape[] = { 1, 4, 5, 2 };
441  const unsigned int outputShape[] = { 1, 3, 4, 2 };
442  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
443 
444  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
445 }
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [12/17]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs  )

Definition at line 466 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

467 {
468  Graph graph;
469  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
470  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
471  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
472 
473  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
474 
475  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
476 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [13/17]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs1DParams  )

Definition at line 478 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

479 {
480  Graph graph;
481  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
482  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
483  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
484 
485  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
486 
487  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
488 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [14/17]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputsMultiDimIndices  )

Definition at line 490 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

491 {
492  Graph graph;
493  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
494  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
495  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
496 
497  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
498 
499  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
500 }
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [15/17]

BOOST_AUTO_TEST_CASE ( DetectionPostProcessValidateTensorShapes  )

Definition at line 502 of file OptimizerTests.cpp.

References Graph::AddLayer(), anchors(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), Graph::InferTensorInfos(), DetectionPostProcessLayer::m_Anchors, DetectionPostProcessDescriptor::m_MaxDetections, armnn::QAsymmU8, scoresInfo, and OutputSlot::SetTensorInfo().

503 {
504  Graph graph;
505  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
506  armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
507  std::vector<uint8_t> anchorsVector(40);
509 
510  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
511  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
512  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
513  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
514 
515  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
516  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
517 
518  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
520 
522  descriptor.m_MaxDetections = 3;
523 
524  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
525  layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
526  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
527  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
528  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
529  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
530 
531  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
532  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
533 
534  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
535 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
This layer represents a detection postprocess operator.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
void InferTensorInfos()
Definition: Graph.cpp:493
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ BOOST_AUTO_TEST_CASE() [16/17]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoConvolution2dLayer  )

Definition at line 537 of file OptimizerTests.cpp.

References Graph::AddLayer(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), armnn::IsActivationSupported(), armnn::IsInputSupported(), armnn::IsOutputSupported(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

538 {
539  Graph graph;
540  const unsigned int inputShape[] = { 1, 2, 2, 3 };
541  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
542  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
543  const unsigned int outputShape[] = { 1, 2, 1, 1 };
544 
545 
546  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
547  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
548  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
549 
550  Layer* input = graph.AddLayer<InputLayer>(0, "input");
551  input->GetOutputSlot().SetTensorInfo(inputInfo);
552 
553  PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
554 
555  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
556  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
557 
558  Convolution2dDescriptor convolution2dDescriptor;
559  convolution2dDescriptor.m_BiasEnabled = false;
560  convolution2dDescriptor.m_StrideX = 1;
561  convolution2dDescriptor.m_StrideY = 1;
562  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
563 
564  std::vector<float> weightsVector(18);
565  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
566 
567  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d");
568  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
569  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
570 
571  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
572 
573  // Connect up layers - input -> pad -> conv2d -> output
574  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
575  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
576  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
577 
578  auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool
579  {
580  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
581  const auto conv2dLayerParams = conv2dLayer->GetParameters();
582  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
583  (layer->GetNameStr() == "conv2d") &&
584  (conv2dLayerParams.m_PadLeft == 0) &&
585  (conv2dLayerParams.m_PadRight == 0) &&
586  (conv2dLayerParams.m_PadTop == 0) &&
587  (conv2dLayerParams.m_PadBottom == 0) &&
588  (conv2dLayerParams.m_BiasEnabled == false) &&
589  (conv2dLayerParams.m_StrideX == 1) &&
590  (conv2dLayerParams.m_StrideY == 1) &&
591  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
592  };
593 
594  BOOST_TEST(CheckSequence(graph.cbegin(),
595  graph.cend(),
596  &IsLayerOfType<armnn::InputLayer>,
597  &IsLayerOfType<armnn::PadLayer>,
598  checkSimpleConv2d,
599  &IsLayerOfType<armnn::OutputLayer>));
600 
602 
603  auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool
604  {
605  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
606  const auto conv2dLayerParams = conv2dLayer->GetParameters();
607  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
608  (layer->GetNameStr() == "folded-pad-into-conv2d") &&
609  (conv2dLayerParams.m_PadLeft == 2) &&
610  (conv2dLayerParams.m_PadRight == 2) &&
611  (conv2dLayerParams.m_PadTop == 2) &&
612  (conv2dLayerParams.m_PadBottom == 2) &&
613  (conv2dLayerParams.m_BiasEnabled == false) &&
614  (conv2dLayerParams.m_StrideX == 1) &&
615  (conv2dLayerParams.m_StrideY == 1) &&
616  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
617  };
618 
619  BOOST_TEST(CheckSequence(graph.cbegin(),
620  graph.cend(),
621  &IsLayerOfType<armnn::InputLayer>,
622  checkPadFoldedIntoConv2d,
623  &IsLayerOfType<armnn::OutputLayer>));
624 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const Parameters & GetParameters() const
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:168
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
This layer represents a pad operation.
Definition: PadLayer.hpp:14
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:170
This layer represents a convolution 2d operation.

◆ BOOST_AUTO_TEST_CASE() [17/17]

BOOST_AUTO_TEST_CASE ( BackendHintTest  )

Definition at line 685 of file OptimizerTests.cpp.

References armnn::AssignBackends(), armnn::BackendRegistryInstance(), Layer::BackendSelectionHint(), BOOST_AUTO_TEST_SUITE_END(), OutputSlot::Connect(), INetwork::Create(), IOptimizedNetwork::Destroy(), OptimizedNetwork::GetGraph(), Layer::GetInputSlot(), Layer::GetOutputSlot(), armnn::IgnoreUnused(), OptimizationResult::IsOk(), armnn::Linear, and ActivationDescriptor::m_Function.

686 {
687  class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
688  {
689  public:
690  void VisitInputLayer(const IConnectableLayer* layer,
691  LayerBindingId id,
692  const char* name = nullptr) override
693  {
694  IgnoreUnused(id, name);
695  auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
696  BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
697  }
698 
699  void VisitOutputLayer(const IConnectableLayer* layer,
700  LayerBindingId id,
701  const char* name = nullptr) override
702  {
703  IgnoreUnused(id, name);
704  auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
705  BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
706  }
707 
708  void VisitActivationLayer(const IConnectableLayer* layer,
709  const ActivationDescriptor& activationDescriptor,
710  const char* name = nullptr) override
711  {
712  IgnoreUnused(activationDescriptor, name);
713  auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
714  BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
715  }
716  };
717 
718  struct CustomPolicy
719  {
720  static const BackendId& GetIdStatic()
721  {
722  static BackendId id="CustomBackend";
723  return id;
724  }
725  };
726 
727  struct MockPolicy
728  {
729  static const BackendId& GetIdStatic()
730  {
731  static BackendId id="MockBackend";
732  return id;
733  }
734  };
735 
736  auto& backendRegistry = BackendRegistryInstance();
737 
738  backendRegistry.Register("MockBackend", [](){
739  return std::make_unique<MockBackend<MockPolicy>>();
740  });
741 
742  backendRegistry.Register("CustomBackend", [](){
743  return std::make_unique<MockBackend<CustomPolicy>>();
744  });
745 
746  // Define the network
747  auto network = INetwork::Create();
749  desc.m_Function = ActivationFunction::Linear;
750 
751  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
752  auto input = graph->AddLayer<InputLayer>(0, "input");
753  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
754  auto output = graph->AddLayer<OutputLayer>(0, "output");
755 
756  BackendId customBackendId("CustomBackend");
757  act->BackendSelectionHint(customBackendId);
758 
759  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
760  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
761 
762 
763  auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
764 
765  OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
766 
767  // Get the optimized graph
768  Graph& optGraph = optNetObjPtr->GetGraph();
769 
770 
771  std::vector<BackendId> prefs{"MockBackend", "CustomBackend"};
772 
773  BackendIdSet availableBackends = {"CustomBackend", "MockBackend"};
774  DeviceSpec spec(availableBackends);
775 
776  BackendSettings backendSettings(prefs, spec);
777 
778  // Assign an available backend to each layer
779  Graph::Iterator firstLayer = optGraph.begin();
780  Graph::Iterator lastLayer = optGraph.end();
781  OptimizationResult res = AssignBackends(optNetObjPtr,
782  backendSettings,
783  firstLayer,
784  lastLayer,
785  EmptyOptional());
786 
787  BOOST_TEST(res.IsOk());
788 
789  TestBackendAssignment visitor;
790  for (auto it =firstLayer; it != lastLayer; ++it)
791  {
792  (*it)->Accept(visitor);
793  }
794 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
void VisitInputLayer(const IConnectableLayer *, LayerBindingId, const char *) override
Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked...
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:324
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
void IgnoreUnused(Ts &&...)
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
Visitor base class with empty implementations.
OptimizationResult AssignBackends(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:269
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void VisitOutputLayer(const IConnectableLayer *, LayerBindingId, const char *) override
Function an output layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
void VisitActivationLayer(const IConnectableLayer *, const ActivationDescriptor &, const char *) override
Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is inv...

◆ CreateConvolution2dGraph()

void CreateConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 256 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

259 {
260  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
261  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
262 
263  std::vector<float> weightsVector(90);
264  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
265 
267  desc.m_BiasEnabled = false;
268  desc.m_StrideX = 1;
269  desc.m_StrideY = 1;
270  desc.m_DataLayout = dataLayout;
271 
272  Layer* input = graph.AddLayer<InputLayer>(0, "input");
273  input->GetOutputSlot().SetTensorInfo(inputInfo);
274 
275  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
276  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
277  layer->GetOutputSlot().SetTensorInfo(outputInfo);
278 
279  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
280  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
281  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
282 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
This layer represents a convolution 2d operation.

◆ CreateDepthwiseConvolution2dGraph()

void CreateDepthwiseConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 306 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

309 {
310  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
311  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
312 
313  std::vector<float> weightsVector(18);
314  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
315 
317  desc.m_BiasEnabled = false;
318  desc.m_StrideX = 1;
319  desc.m_StrideY = 1;
320  desc.m_DataLayout = dataLayout;
321 
322  Layer* input = graph.AddLayer<InputLayer>(0, "input");
323  input->GetOutputSlot().SetTensorInfo(inputInfo);
324 
325  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
326  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
327  layer->GetOutputSlot().SetTensorInfo(outputInfo);
328 
329  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
330  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
331  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
332 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.

◆ CreateGatherGraph()

void CreateGatherGraph ( Graph graph,
const armnn::TensorInfo paramsInfo,
const armnn::TensorInfo indicesInfo,
const armnn::TensorInfo outputInfo 
)

Definition at line 448 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

450 {
451  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
452  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
453 
454  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
455  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
456 
457  GatherLayer* layer = graph.AddLayer<GatherLayer>("gather");
458  layer->GetOutputSlot().SetTensorInfo(outputInfo);
459 
460  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
461  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
462  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
463  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
464 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312

◆ CreatePooling2dGraph()

void CreatePooling2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 356 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, OutputSlot::Connect(), armnn::Exclude, armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

358 {
359  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
360  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
361 
362  Pooling2dDescriptor desc;
364  desc.m_PoolWidth = desc.m_PoolHeight = 100;
365  desc.m_StrideX = desc.m_StrideY = 5;
366  desc.m_PadLeft = 50;
367  desc.m_PadRight = 50;
368  desc.m_PadTop = 50;
369  desc.m_PadBottom = 50;
371  desc.m_DataLayout = dataLayout;
372 
373  Layer* input = graph.AddLayer<InputLayer>(0, "input");
374  input->GetOutputSlot().SetTensorInfo(inputInfo);
375 
376  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
377  layer->GetOutputSlot().SetTensorInfo(outputInfo);
378 
379  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
380  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
381  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
382 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
uint32_t m_PoolWidth
Pooling width value.
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ CreateResizeBilinearGraph()

void CreateResizeBilinearGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 404 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Bilinear, OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

406 {
407  TensorInfo inputInfo(4, inputShape, DataType::Float32);
408  TensorInfo outputInfo(4, outputShape, DataType::Float32);
409 
410  ResizeDescriptor desc;
411  desc.m_Method = ResizeMethod::Bilinear;
412  desc.m_TargetHeight = 3;
413  desc.m_TargetWidth = 4;
414  desc.m_DataLayout = dataLayout;
415 
416  Layer* input = graph.AddLayer<InputLayer>(0, "input");
417  input->GetOutputSlot().SetTensorInfo(inputInfo);
418 
419  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
420  layer->GetOutputSlot().SetTensorInfo(outputInfo);
421 
422  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
423  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
424  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
425 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
A ResizeDescriptor for the ResizeLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_TargetWidth
Target width value.
uint32_t m_TargetHeight
Target height value.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13