ArmNN  NotReleased
OptimizerTests.cpp File Reference
#include "TestUtils.hpp"
#include <Graph.hpp>
#include <Optimizer.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
 
 BOOST_AUTO_TEST_CASE (LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
 
 BOOST_AUTO_TEST_CASE (InsertConvertersTest)
 
void CreateConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Conv2dValidateTensorShapesFromInputsNhwc)
 
void CreateDepthwiseConvolution2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
 
void CreatePooling2dGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (Pooling2dValidateTensorShapesFromInputsNhwc)
 
void CreateResizeBilinearGraph (Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (ResizeBilinearValidateTensorShapesFromInputsNhwc)
 
void CreateGatherGraph (Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputs1DParams)
 
 BOOST_AUTO_TEST_CASE (GatherValidateTensorShapesFromInputsMultiDimIndices)
 
 BOOST_AUTO_TEST_CASE (DetectionPostProcessValidateTensorShapes)
 
 BOOST_AUTO_TEST_CASE (FoldPadLayerIntoConvolution2dLayer)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/16]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGDisabledTest  )

Definition at line 134 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

135 {
136  Graph graph;
137 
138  //Helper function creates graph containing LSTM layer with required input and output layers
139  CreateLSTMLayerHelper(graph, false);
140 
141  //This function used to call ValidateShapesFromInputs();
142  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
143 }
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [2/16]

BOOST_AUTO_TEST_CASE ( LSTMValidateTensorShapesFromInputsCIFGEnabledTest  )

Definition at line 145 of file OptimizerTests.cpp.

References Graph::InferTensorInfos().

146 {
147  Graph graph;
148 
149  //Helper function creates graph containing LSTM layer with required input and output layers
150  CreateLSTMLayerHelper(graph, true);
151 
152  //This function used to call ValidateShapesFromInputs();
153  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
154 }
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [3/16]

BOOST_AUTO_TEST_CASE ( InsertConvertersTest  )

Definition at line 156 of file OptimizerTests.cpp.

References armnn::Addition, CheckSequence(), armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Float16, armnn::Float32, armnn::Floor, TensorInfo::GetDataType(), Layer::GetDataType(), Layer::GetInputSlot(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::info, armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), and OutputHandler::SetTensorInfo().

157 {
158  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
159 
160  armnn::Graph graph;
161 
162  armnn::LayerBindingId inputId = 0;
163 
164  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
165 
166  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
167  head->GetOutputHandler().SetTensorInfo(info);
168 
169  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
170  ->GetOutputHandler().SetTensorInfo(info);
171 
172  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
173  head->GetOutputHandler().SetTensorInfo(info);
174 
175  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
176  head->GetOutputHandler().SetTensorInfo(info);
177 
178  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
179  ->GetOutputHandler().SetTensorInfo(info);
180 
181  // Check graph layer sequence before inserting convert layers
182  BOOST_TEST(CheckSequence(graph.cbegin(),
183  graph.cend(),
184  &IsLayerOfType<armnn::InputLayer>,
185  &IsLayerOfType<armnn::InputLayer>,
186  &IsLayerOfType<armnn::MemCopyLayer>,
187  &IsLayerOfType<armnn::FloorLayer>,
188  &IsLayerOfType<armnn::AdditionLayer>,
189  &IsLayerOfType<armnn::OutputLayer>));
190 
191  // Check layers have Float16 DataType
192  for (auto& layer : graph)
193  {
194  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
195  {
196  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
197  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
198  }
199  }
200 
201  // Insert convert layers either side of unsupported layer
202  for (auto& layer : graph)
203  {
204  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
205  {
207  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
208  }
209  }
210 
211  // Check layers have correct DataType after inserting convert layers
212  for (auto& layer : graph)
213  {
214  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
215  {
216  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
217  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
218  }
219  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
220  {
221  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
222  BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
223  }
224  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
225  {
226  BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
227  BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
228  }
229  }
230 
231  // Check sequence of layers after inserting convert layers
232  BOOST_TEST(CheckSequence(graph.cbegin(),
233  graph.cend(),
234  &IsLayerOfType<armnn::InputLayer>,
235  &IsLayerOfType<armnn::InputLayer>,
236  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
237  &IsLayerOfType<armnn::MemCopyLayer>,
238  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
239  &IsLayerOfType<armnn::FloorLayer>,
240  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
241  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
242  &IsLayerOfType<armnn::AdditionLayer>,
243  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
244  &IsLayerOfType<armnn::OutputLayer>));
245 }
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
This layer represents a memory copy operation.
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents an addition operation.
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:168
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310

◆ BOOST_AUTO_TEST_CASE() [4/16]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputs  )

Definition at line 277 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), and Graph::InferTensorInfos().

278 {
279  Graph graph;
280  const unsigned int inputShape[] = { 1, 3, 8, 16 };
281  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
282  const unsigned int outputShape[] = { 1, 2, 4, 14 };
283  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
284 
285  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
286 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [5/16]

BOOST_AUTO_TEST_CASE ( Conv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 288 of file OptimizerTests.cpp.

References CreateConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

289 {
290  Graph graph;
291  const unsigned int inputShape[] = { 1, 8, 16, 3 };
292  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
293  const unsigned int outputShape[] = { 1, 4, 14, 2 };
294  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
295 
296  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
297 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [6/16]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputs  )

Definition at line 327 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), and Graph::InferTensorInfos().

328 {
329  Graph graph;
330  const unsigned int inputShape[] = { 1, 2, 3, 3 };
331  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
332  const unsigned int outputShape[] = { 1, 2, 1, 1 };
333  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
334 
335  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
336 }
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [7/16]

BOOST_AUTO_TEST_CASE ( DepthwiseConv2dValidateTensorShapesFromInputsNhwc  )

Definition at line 338 of file OptimizerTests.cpp.

References CreateDepthwiseConvolution2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

339 {
340  Graph graph;
341  const unsigned int inputShape[] = { 1, 3, 3, 2 };
342  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
343  const unsigned int outputShape[] = { 1, 1, 1, 2 };
344  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
345 
346  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
347 }
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void InferTensorInfos()
Definition: Graph.cpp:493

◆ BOOST_AUTO_TEST_CASE() [8/16]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputs  )

Definition at line 377 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NCHW.

378 {
379  Graph graph;
380  const unsigned int inputShape[] = { 5, 3, 52, 60 };
381  const unsigned int outputShape[] = { 5, 3, 11, 13 };
382  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
383 
384  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
385 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [9/16]

BOOST_AUTO_TEST_CASE ( Pooling2dValidateTensorShapesFromInputsNhwc  )

Definition at line 387 of file OptimizerTests.cpp.

References CreatePooling2dGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

388 {
389  Graph graph;
390  const unsigned int inputShape[] = { 5, 52, 60, 3 };
391  const unsigned int outputShape[] = { 5, 11, 13, 3 };
392  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
393 
394  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
395 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [10/16]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputs  )

Definition at line 420 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), and Graph::InferTensorInfos().

421 {
422  Graph graph;
423  const unsigned int inputShape[] = { 1, 2, 4, 5 };
424  const unsigned int outputShape[] = { 1, 2, 3, 4 };
425  CreateResizeBilinearGraph(graph, inputShape, outputShape);
426 
427  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
428 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [11/16]

BOOST_AUTO_TEST_CASE ( ResizeBilinearValidateTensorShapesFromInputsNhwc  )

Definition at line 430 of file OptimizerTests.cpp.

References CreateResizeBilinearGraph(), Graph::InferTensorInfos(), and armnn::NHWC.

431 {
432  Graph graph;
433  const unsigned int inputShape[] = { 1, 4, 5, 2 };
434  const unsigned int outputShape[] = { 1, 3, 4, 2 };
435  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
436 
437  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
438 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)

◆ BOOST_AUTO_TEST_CASE() [12/16]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs  )

Definition at line 459 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

460 {
461  Graph graph;
462  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
463  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
464  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
465 
466  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
467 
468  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
469 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)

◆ BOOST_AUTO_TEST_CASE() [13/16]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputs1DParams  )

Definition at line 471 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

472 {
473  Graph graph;
474  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
475  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
476  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
477 
478  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
479 
480  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
481 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)

◆ BOOST_AUTO_TEST_CASE() [14/16]

BOOST_AUTO_TEST_CASE ( GatherValidateTensorShapesFromInputsMultiDimIndices  )

Definition at line 483 of file OptimizerTests.cpp.

References CreateGatherGraph(), armnn::Float32, Graph::InferTensorInfos(), and armnn::Signed32.

484 {
485  Graph graph;
486  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
487  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
488  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
489 
490  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
491 
492  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
493 }
void InferTensorInfos()
Definition: Graph.cpp:493
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo &paramsInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)

◆ BOOST_AUTO_TEST_CASE() [15/16]

BOOST_AUTO_TEST_CASE ( DetectionPostProcessValidateTensorShapes  )

Definition at line 495 of file OptimizerTests.cpp.

References Graph::AddLayer(), anchors(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), Graph::InferTensorInfos(), DetectionPostProcessLayer::m_Anchors, DetectionPostProcessDescriptor::m_MaxDetections, armnn::QAsymmU8, scoresInfo, and OutputSlot::SetTensorInfo().

496 {
497  Graph graph;
498  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
499  armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
500  std::vector<uint8_t> anchorsVector(40);
502 
503  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
504  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
505  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
506  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
507 
508  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
509  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
510 
511  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
513 
515  descriptor.m_MaxDetections = 3;
516 
517  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
518  layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
519  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
520  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
521  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
522  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
523 
524  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
525  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
526 
527  BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
528 }
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
This layer represents a detection postprocess operator.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
void InferTensorInfos()
Definition: Graph.cpp:493
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310
uint32_t m_MaxDetections
Maximum numbers of detections.

◆ BOOST_AUTO_TEST_CASE() [16/16]

BOOST_AUTO_TEST_CASE ( FoldPadLayerIntoConvolution2dLayer  )

Definition at line 530 of file OptimizerTests.cpp.

References Graph::AddLayer(), BOOST_AUTO_TEST_SUITE_END(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetNameStr(), Layer::GetOutputSlot(), LayerWithParameters< Parameters >::GetParameters(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, armnn::MakeOptimizations(), armnn::NHWC, Optimizer::Pass(), and OutputSlot::SetTensorInfo().

531 {
532  Graph graph;
533  const unsigned int inputShape[] = { 1, 2, 2, 3 };
534  const unsigned int paddedShape[] = { 1, 6, 6, 3 };
535  const unsigned int weightsShape[] = { 1, 2, 3, 3 };
536  const unsigned int outputShape[] = { 1, 2, 1, 1 };
537 
538 
539  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
540  armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
541  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
542 
543  Layer* input = graph.AddLayer<InputLayer>(0, "input");
544  input->GetOutputSlot().SetTensorInfo(inputInfo);
545 
546  PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
547 
548  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
549  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
550 
551  Convolution2dDescriptor convolution2dDescriptor;
552  convolution2dDescriptor.m_BiasEnabled = false;
553  convolution2dDescriptor.m_StrideX = 1;
554  convolution2dDescriptor.m_StrideY = 1;
555  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
556 
557  std::vector<float> weightsVector(18);
558  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
559 
560  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d");
561  conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
562  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
563 
564  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
565 
566  // Connect up layers - input -> pad -> conv2d -> output
567  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
568  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
569  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
570 
571  auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool
572  {
573  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
574  const auto conv2dLayerParams = conv2dLayer->GetParameters();
575  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
576  (layer->GetNameStr() == "conv2d") &&
577  (conv2dLayerParams.m_PadLeft == 0) &&
578  (conv2dLayerParams.m_PadRight == 0) &&
579  (conv2dLayerParams.m_PadTop == 0) &&
580  (conv2dLayerParams.m_PadBottom == 0) &&
581  (conv2dLayerParams.m_BiasEnabled == false) &&
582  (conv2dLayerParams.m_StrideX == 1) &&
583  (conv2dLayerParams.m_StrideY == 1) &&
584  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
585  };
586 
587  BOOST_TEST(CheckSequence(graph.cbegin(),
588  graph.cend(),
589  &IsLayerOfType<armnn::InputLayer>,
590  &IsLayerOfType<armnn::PadLayer>,
591  checkSimpleConv2d,
592  &IsLayerOfType<armnn::OutputLayer>));
593 
595 
596  auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool
597  {
598  const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer);
599  const auto conv2dLayerParams = conv2dLayer->GetParameters();
600  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
601  (layer->GetNameStr() == "folded-pad-into-conv2d") &&
602  (conv2dLayerParams.m_PadLeft == 2) &&
603  (conv2dLayerParams.m_PadRight == 2) &&
604  (conv2dLayerParams.m_PadTop == 2) &&
605  (conv2dLayerParams.m_PadBottom == 2) &&
606  (conv2dLayerParams.m_BiasEnabled == false) &&
607  (conv2dLayerParams.m_StrideX == 1) &&
608  (conv2dLayerParams.m_StrideY == 1) &&
609  (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
610  };
611 
612  BOOST_TEST(CheckSequence(graph.cbegin(),
613  graph.cend(),
614  &IsLayerOfType<armnn::InputLayer>,
615  checkPadFoldedIntoConv2d,
616  &IsLayerOfType<armnn::OutputLayer>));
617 }
This layer represents a pad operation.
Definition: PadLayer.hpp:14
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
This layer represents a convolution 2d operation.
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A PadDescriptor for the PadLayer.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
const Parameters & GetParameters() const
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
bool m_BiasEnabled
Enable/disable bias.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:168
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:170
A Convolution2dDescriptor for the Convolution2dLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310

◆ CreateConvolution2dGraph()

void CreateConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 249 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

252 {
253  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
254  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
255 
256  std::vector<float> weightsVector(90);
257  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
258 
260  desc.m_BiasEnabled = false;
261  desc.m_StrideX = 1;
262  desc.m_StrideY = 1;
263  desc.m_DataLayout = dataLayout;
264 
265  Layer* input = graph.AddLayer<InputLayer>(0, "input");
266  input->GetOutputSlot().SetTensorInfo(inputInfo);
267 
268  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
269  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
270  layer->GetOutputSlot().SetTensorInfo(outputInfo);
271 
272  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
273  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
274  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
275 }
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
This layer represents a convolution 2d operation.
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
bool m_BiasEnabled
Enable/disable bias.
A Convolution2dDescriptor for the Convolution2dLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310

◆ CreateDepthwiseConvolution2dGraph()

void CreateDepthwiseConvolution2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  weightsShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 299 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dLayer::m_Weight, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

302 {
303  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
304  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
305 
306  std::vector<float> weightsVector(18);
307  armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
308 
310  desc.m_BiasEnabled = false;
311  desc.m_StrideX = 1;
312  desc.m_StrideY = 1;
313  desc.m_DataLayout = dataLayout;
314 
315  Layer* input = graph.AddLayer<InputLayer>(0, "input");
316  input->GetOutputSlot().SetTensorInfo(inputInfo);
317 
318  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
319  layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
320  layer->GetOutputSlot().SetTensorInfo(outputInfo);
321 
322  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
323  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
324  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
325 }
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
bool m_BiasEnabled
Enable/disable bias.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310

◆ CreateGatherGraph()

void CreateGatherGraph ( Graph graph,
const armnn::TensorInfo paramsInfo,
const armnn::TensorInfo indicesInfo,
const armnn::TensorInfo outputInfo 
)

Definition at line 441 of file OptimizerTests.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), Layer::GetInputSlot(), Layer::GetOutputSlot(), and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

443 {
444  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
445  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
446 
447  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
448  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
449 
450  GatherLayer* layer = graph.AddLayer<GatherLayer>("gather");
451  layer->GetOutputSlot().SetTensorInfo(outputInfo);
452 
453  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
454  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
455  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
456  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
457 }
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310

◆ CreatePooling2dGraph()

void CreatePooling2dGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 349 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Average, OutputSlot::Connect(), armnn::Exclude, armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

351 {
352  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
353  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
354 
355  Pooling2dDescriptor desc;
357  desc.m_PoolWidth = desc.m_PoolHeight = 100;
358  desc.m_StrideX = desc.m_StrideY = 5;
359  desc.m_PadLeft = 50;
360  desc.m_PadRight = 50;
361  desc.m_PadTop = 50;
362  desc.m_PadBottom = 50;
364  desc.m_DataLayout = dataLayout;
365 
366  Layer* input = graph.AddLayer<InputLayer>(0, "input");
367  input->GetOutputSlot().SetTensorInfo(inputInfo);
368 
369  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
370  layer->GetOutputSlot().SetTensorInfo(outputInfo);
371 
372  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
373  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
374  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
375 }
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
This layer represents a pooling 2d operation.
uint32_t m_PoolWidth
Pooling width value.
The padding fields don&#39;t count and are ignored.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
uint32_t m_PadLeft
Padding left value in the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
A Pooling2dDescriptor for the Pooling2dLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310

◆ CreateResizeBilinearGraph()

void CreateResizeBilinearGraph ( Graph graph,
const unsigned int *  inputShape,
const unsigned int *  outputShape,
DataLayout  dataLayout = DataLayout::NCHW 
)

Definition at line 397 of file OptimizerTests.cpp.

References Graph::AddLayer(), armnn::Bilinear, OutputSlot::Connect(), armnn::Float32, Layer::GetInputSlot(), Layer::GetOutputSlot(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and OutputSlot::SetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE().

399 {
400  TensorInfo inputInfo(4, inputShape, DataType::Float32);
401  TensorInfo outputInfo(4, outputShape, DataType::Float32);
402 
403  ResizeDescriptor desc;
404  desc.m_Method = ResizeMethod::Bilinear;
405  desc.m_TargetHeight = 3;
406  desc.m_TargetWidth = 4;
407  desc.m_DataLayout = dataLayout;
408 
409  Layer* input = graph.AddLayer<InputLayer>(0, "input");
410  input->GetOutputSlot().SetTensorInfo(inputInfo);
411 
412  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
413  layer->GetOutputSlot().SetTensorInfo(outputInfo);
414 
415  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
416  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
417  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
418 }
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
uint32_t m_TargetHeight
Target height value.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_TargetWidth
Target width value.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
A ResizeDescriptor for the ResizeLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13