ArmNN
 22.05
OptimizerTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <TestUtils.hpp>
7 
8 #include <BackendSettings.hpp>
9 #include <Graph.hpp>
10 #include <Network.hpp>
11 #include <Optimizer.hpp>
12 
13 #include <armnn/BackendHelper.hpp>
15 #include <armnn/INetwork.hpp>
16 #include <armnn/StrategyBase.hpp>
17 
18 #include <armnn/utility/Assert.hpp>
21 
24 
25 #include <doctest/doctest.h>
26 
27 using namespace armnn;
28 
29 namespace
30 {
31 
32 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
33 {
34  LstmDescriptor layerDesc;
35  layerDesc.m_ActivationFunc = 4;
36  layerDesc.m_ClippingThresCell = 0.2f;
37  layerDesc.m_ClippingThresProj = 0.4f;
38  layerDesc.m_CifgEnabled = CifgEnabled;
39  layerDesc.m_PeepholeEnabled = false;
40  layerDesc.m_ProjectionEnabled = false;
41 
42  LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
43  unsigned int batchSize = 3;
44  unsigned int inputSize = 2;
45  unsigned int numUnits = 4;
46  unsigned int outputSize = 4;
47 
48  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
49  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
50  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
51  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
52  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
53  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
54  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
55  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
56  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
57  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
58  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
59  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
60  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
61  (TensorInfo({ numUnits }, DataType::Float32));
62  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
63  (TensorInfo({ numUnits }, DataType::Float32));
64  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
65  (TensorInfo({ numUnits }, DataType::Float32));
66 
67  layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
68  layer->m_BasicParameters.m_InputToCellWeights->Allocate();
69  layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
73  layer->m_BasicParameters.m_ForgetGateBias->Allocate();
74  layer->m_BasicParameters.m_CellBias->Allocate();
75  layer->m_BasicParameters.m_OutputGateBias->Allocate();
76 
77  if (!layerDesc.m_CifgEnabled)
78  {
79  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>
80  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
81  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>
82  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
83  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>
84  (TensorInfo({ numUnits }, DataType::Float32));
85  layer->m_CifgParameters.m_InputToInputWeights->Allocate();
87  layer->m_CifgParameters.m_InputGateBias->Allocate();
88  }
89 
90  if (layerDesc.m_ProjectionEnabled)
91  {
92  layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedTensorHandle>
93  (TensorInfo({ outputSize, numUnits }, DataType::Float32));
94  layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedTensorHandle>
95  (TensorInfo({ outputSize }, DataType::Float32));
97  layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
98  }
99 
100  if (layerDesc.m_PeepholeEnabled)
101  {
102  if (!layerDesc.m_CifgEnabled)
103  {
104  layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedTensorHandle>
105  (TensorInfo({ numUnits }, DataType::Float32));
106  layer->m_PeepholeParameters.m_CellToInputWeights->Allocate();
107  }
108  layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
109  (TensorInfo({ numUnits }, DataType::Float32));
110  layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
111  (TensorInfo({ numUnits }, DataType::Float32));
112  layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
113  layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
114  }
115 
116  // create input and output layers
117  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
118  Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
119  Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
120  Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
121  Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
122  Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
123  Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
124 
125  // connect up
126  armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
127  armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
128  armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
129  armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
131 
132  Connect(input, layer, lstmTensorInfo1, 0, 0);
133  Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
134  Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
135  Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
136  Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
137  Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
138  Connect(layer, output, lstmTensorInfo3, 3, 0);
139 }
140 
141 
142 class MockLayerSupport : public LayerSupportBase
143 {
144 public:
145  bool IsLayerSupported(const LayerType& type,
146  const std::vector<TensorInfo>& infos,
147  const BaseDescriptor& descriptor,
148  const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
149  const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
151  {
152  switch (type)
153  {
154  case LayerType::Input:
155  return IsInputSupported(infos[0], reasonIfUnsupported);
156  case LayerType::Output:
157  return IsOutputSupported(infos[0], reasonIfUnsupported);
159  return IsActivationSupported(infos[0],
160  infos[1],
161  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
162  reasonIfUnsupported);
163  default:
164  return false;
165  }
166  }
167 
168  bool IsInputSupported(const TensorInfo& /*input*/,
169  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
170  {
171  return true;
172  }
173 
174  bool IsOutputSupported(const TensorInfo& /*input*/,
175  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
176  {
177  return true;
178  }
179 
180  bool IsActivationSupported(const TensorInfo& /*input0*/,
181  const TensorInfo& /*output*/,
182  const ActivationDescriptor& /*descriptor*/,
183  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
184  {
185  return true;
186  }
187 };
188 
189 template <typename NamePolicy>
190 class CustomAllocatorBackend : public IBackendInternal
191 {
192 public:
193  CustomAllocatorBackend() :
194  m_BackendCapabilities(NamePolicy::GetIdStatic(), {{"NullCapability", false}}),
195  m_CustomAllocator(false) {};
196  CustomAllocatorBackend(const BackendCapabilities& capabilities) :
197  m_BackendCapabilities(capabilities),
198  m_CustomAllocator(false) {};
199  ~CustomAllocatorBackend() = default;
200 
201  static const BackendId& GetIdStatic()
202  {
203  return NamePolicy::GetIdStatic();
204  }
205  const BackendId& GetId() const override
206  {
207  return GetIdStatic();
208  }
209 
210  IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
211  {
212  return nullptr;
213  };
214 
216  CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr&) const override
217  {
218  return nullptr;
219  }
220 
221  IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
222  {
223  return nullptr;
224  }
225 
226  IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
227  {
228  return std::make_shared<MockLayerSupport>();
229  }
230 
231  OptimizationViews OptimizeSubgraphView(const SubgraphView&) const override
232  {
233  return {};
234  };
235 
236  BackendCapabilities GetCapabilities() const override
237  {
238  return m_BackendCapabilities;
239  };
240 
241  virtual bool UseCustomMemoryAllocator(std::shared_ptr<ICustomAllocator> allocator,
242  armnn::Optional<std::string&> errMsg) override
243  {
244  IgnoreUnused(errMsg, allocator);
245  m_CustomAllocator = true;
246  return m_CustomAllocator;
247  }
248 
249  BackendCapabilities m_BackendCapabilities;
250  bool m_CustomAllocator;
251 };
252 
253 template <typename NamePolicy>
254 class NoProtectedModeMockBackend : public IBackendInternal
255 {
256 public:
257  NoProtectedModeMockBackend() : m_BackendCapabilities(NamePolicy::GetIdStatic(), {{"NullCapability", false}}) {};
258  NoProtectedModeMockBackend(const BackendCapabilities& capabilities) : m_BackendCapabilities(capabilities) {};
259  ~NoProtectedModeMockBackend() = default;
260 
261  static const BackendId& GetIdStatic()
262  {
263  return NamePolicy::GetIdStatic();
264  }
265  const BackendId& GetId() const override
266  {
267  return GetIdStatic();
268  }
269 
270  IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
271  {
272  return nullptr;
273  };
274 
276  CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr&) const override
277  {
278  return nullptr;
279  }
280 
281  IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
282  {
283  return nullptr;
284  }
285 
286  IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
287  {
288  return std::make_shared<MockLayerSupport>();
289  }
290 
291  OptimizationViews OptimizeSubgraphView(const SubgraphView&) const override
292  {
293  return {};
294  };
295 
296  BackendCapabilities GetCapabilities() const override
297  {
298  return m_BackendCapabilities;
299  };
300 
301  BackendCapabilities m_BackendCapabilities;
302 };
303 
304 } // namespace
305 
306 TEST_SUITE("Optimizer")
307 {
308 using namespace armnn::optimizations;
309 
310 TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGDisabledTest")
311 {
312  Graph graph;
313 
314  //Helper function creates graph containing LSTM layer with required input and output layers
315  CreateLSTMLayerHelper(graph, false);
316 
317  //This function used to call ValidateShapesFromInputs();
318  CHECK_NOTHROW(graph.InferTensorInfos());
319 }
320 
321 TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGEnabledTest")
322 {
323  Graph graph;
324 
325  //Helper function creates graph containing LSTM layer with required input and output layers
326  CreateLSTMLayerHelper(graph, true);
327 
328  //This function used to call ValidateShapesFromInputs();
329  CHECK_NOTHROW(graph.InferTensorInfos());
330 }
331 
332 TEST_CASE("InsertConvertersTest")
333 {
334  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
335 
336  armnn::Graph graph;
337 
338  armnn::LayerBindingId inputId = 0;
339 
340  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
341 
342  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
344 
345  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
346  ->GetOutputHandler().SetTensorInfo(info);
347 
348  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
350 
351  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
353 
354  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
355  ->GetOutputHandler().SetTensorInfo(info);
356 
357  // Check graph layer sequence before inserting convert layers
358  CHECK(CheckSequence(graph.cbegin(),
359  graph.cend(),
360  &IsLayerOfType<armnn::InputLayer>,
361  &IsLayerOfType<armnn::InputLayer>,
362  &IsLayerOfType<armnn::MemCopyLayer>,
363  &IsLayerOfType<armnn::FloorLayer>,
364  &IsLayerOfType<armnn::AdditionLayer>,
365  &IsLayerOfType<armnn::OutputLayer>));
366 
367  // Check layers have Float16 DataType
368  for (auto& layer : graph)
369  {
370  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
371  {
374  }
375  }
376 
377  // Insert convert layers either side of unsupported layer
378  for (auto& layer : graph)
379  {
380  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
381  {
383  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
384  }
385  }
386 
387  // Check layers have correct DataType after inserting convert layers
388  for (auto& layer : graph)
389  {
390  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
391  {
394  }
395  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
396  {
399  }
400  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
401  {
404  }
405  }
406 
407  // Check sequence of layers after inserting convert layers
408  CHECK(CheckSequence(graph.cbegin(),
409  graph.cend(),
410  &IsLayerOfType<armnn::InputLayer>,
411  &IsLayerOfType<armnn::InputLayer>,
412  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
413  &IsLayerOfType<armnn::MemCopyLayer>,
414  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
415  &IsLayerOfType<armnn::FloorLayer>,
416  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
417  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
418  &IsLayerOfType<armnn::AdditionLayer>,
419  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
420  &IsLayerOfType<armnn::OutputLayer>));
421 }
422 
423 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
424  const unsigned int* weightsShape, const unsigned int* outputShape,
425  DataLayout dataLayout = DataLayout::NCHW)
426 {
427  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
428  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
429 
430  std::vector<float> weightsVector(90);
432  armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32, 0.0f, 0, true),
433  weightsVector);
434 
436  desc.m_BiasEnabled = false;
437  desc.m_StrideX = 1;
438  desc.m_StrideY = 1;
439  desc.m_DataLayout = dataLayout;
440 
441  Layer* input = graph.AddLayer<InputLayer>(0, "input");
442  input->GetOutputSlot().SetTensorInfo(inputInfo);
443 
444  ConstantLayer* weightsLayer = nullptr;
445  weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
446  weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
447  weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
448 
449  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
450  layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
451  layer->GetOutputSlot().SetTensorInfo(outputInfo);
452 
453  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
454  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
455  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
456  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
457 }
458 
459 TEST_CASE("Conv2dValidateTensorShapesFromInputs")
460 {
461  Graph graph;
462  const unsigned int inputShape[] = { 1, 3, 8, 16 };
463  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
464  const unsigned int outputShape[] = { 1, 2, 4, 14 };
465  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
466 
467  CHECK_NOTHROW(graph.InferTensorInfos());
468 }
469 
470 TEST_CASE("Conv2dValidateTensorShapesFromInputsNhwc")
471 {
472  Graph graph;
473  const unsigned int inputShape[] = { 1, 8, 16, 3 };
474  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
475  const unsigned int outputShape[] = { 1, 4, 14, 2 };
476  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
477 
478  CHECK_NOTHROW(graph.InferTensorInfos());
479 }
480 
481 void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
482  const unsigned int* weightsShape, const unsigned int* outputShape,
483  DataLayout dataLayout = DataLayout::NCHW)
484 {
485  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
486  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
487  armnn::TensorInfo weightsInfo(TensorShape(4, weightsShape), armnn::DataType::Float32, 0.0f, 0, true);
488 
489  std::vector<float> weightsVector(18);
490  armnn::ConstTensor weights(weightsInfo, weightsVector);
491 
493  desc.m_BiasEnabled = false;
494  desc.m_StrideX = 1;
495  desc.m_StrideY = 1;
496  desc.m_DataLayout = dataLayout;
497 
498  InputLayer* input = graph.AddLayer<InputLayer>(0, "input");
499  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
500  ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("weights");
501  OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output");
502 
503  input->GetOutputSlot().SetTensorInfo(inputInfo);
504  layer->GetOutputSlot().SetTensorInfo(outputInfo);
505  weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo);
506 
507  weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
508 
509  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
510  weightsLayer->GetOutputSlot().Connect(layer->GetInputSlot(1));
511  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
512 }
513 
514 TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputs")
515 {
516  Graph graph;
517  const unsigned int inputShape[] = { 1, 2, 3, 3 };
518  const unsigned int weightsShape[] = { 1, 3, 3, 2 };
519  const unsigned int outputShape[] = { 1, 2, 1, 1 };
520  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
521 
522  CHECK_NOTHROW(graph.InferTensorInfos());
523 }
524 
525 TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputsNhwc")
526 {
527  Graph graph;
528  const unsigned int inputShape[] = { 1, 3, 3, 2 };
529  const unsigned int weightsShape[] = { 1, 3, 3, 2 };
530  const unsigned int outputShape[] = { 1, 1, 1, 2 };
531  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
532 
533  CHECK_NOTHROW(graph.InferTensorInfos());
534 }
535 
536 void CreatePooling2dGraph(Graph& graph, const unsigned int* inputShape, const unsigned int* outputShape,
537  DataLayout dataLayout = DataLayout::NCHW)
538 {
539  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
540  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
541 
542  Pooling2dDescriptor desc;
544  desc.m_PoolWidth = desc.m_PoolHeight = 100;
545  desc.m_StrideX = desc.m_StrideY = 5;
546  desc.m_PadLeft = 50;
547  desc.m_PadRight = 50;
548  desc.m_PadTop = 50;
549  desc.m_PadBottom = 50;
551  desc.m_DataLayout = dataLayout;
552 
553  Layer* input = graph.AddLayer<InputLayer>(0, "input");
554  input->GetOutputSlot().SetTensorInfo(inputInfo);
555 
556  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
557  layer->GetOutputSlot().SetTensorInfo(outputInfo);
558 
559  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
560  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
561  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
562 }
563 
564 TEST_CASE("Pooling2dValidateTensorShapesFromInputs")
565 {
566  Graph graph;
567  const unsigned int inputShape[] = { 5, 3, 52, 60 };
568  const unsigned int outputShape[] = { 5, 3, 11, 13 };
569  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
570 
571  CHECK_NOTHROW(graph.InferTensorInfos());
572 }
573 
574 TEST_CASE("Pooling2dValidateTensorShapesFromInputsNhwc")
575 {
576  Graph graph;
577  const unsigned int inputShape[] = { 5, 52, 60, 3 };
578  const unsigned int outputShape[] = { 5, 11, 13, 3 };
579  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
580 
581  CHECK_NOTHROW(graph.InferTensorInfos());
582 }
583 
584 void CreateResizeBilinearGraph(Graph& graph,
585  const unsigned int* inputShape,
586  const unsigned int* outputShape,
587  DataLayout dataLayout = DataLayout::NCHW)
588 {
589  TensorInfo inputInfo(4, inputShape, DataType::Float32);
590  TensorInfo outputInfo(4, outputShape, DataType::Float32);
591 
592  ResizeDescriptor desc;
594  desc.m_TargetHeight = 3;
595  desc.m_TargetWidth = 4;
596  desc.m_DataLayout = dataLayout;
597 
598  Layer* input = graph.AddLayer<InputLayer>(0, "input");
599  input->GetOutputSlot().SetTensorInfo(inputInfo);
600 
601  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
602  layer->GetOutputSlot().SetTensorInfo(outputInfo);
603 
604  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
605  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
606  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
607 }
608 
609 TEST_CASE("ResizeBilinearValidateTensorShapesFromInputs")
610 {
611  Graph graph;
612  const unsigned int inputShape[] = { 1, 2, 4, 5 };
613  const unsigned int outputShape[] = { 1, 2, 3, 4 };
614  CreateResizeBilinearGraph(graph, inputShape, outputShape);
615 
616  CHECK_NOTHROW(graph.InferTensorInfos());
617 }
618 
619 TEST_CASE("ResizeBilinearValidateTensorShapesFromInputsNhwc")
620 {
621  Graph graph;
622  const unsigned int inputShape[] = { 1, 4, 5, 2 };
623  const unsigned int outputShape[] = { 1, 3, 4, 2 };
624  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
625 
626  CHECK_NOTHROW(graph.InferTensorInfos());
627 }
628 
629 void CreateGatherGraph(Graph& graph,
631  const armnn::TensorInfo& indicesInfo,
632  const armnn::TensorInfo& outputInfo)
633 {
634  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
635  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
636 
637  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
638  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
639 
641  GatherLayer* layer = graph.AddLayer<GatherLayer>(descriptor, "gather");
642  layer->GetOutputSlot().SetTensorInfo(outputInfo);
643 
644  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
645  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
646  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
647  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
648 }
649 
650 TEST_CASE("GatherValidateTensorShapesFromInputs")
651 {
652  Graph graph;
654  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
655  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
656 
657  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
658 
659  CHECK_NOTHROW(graph.InferTensorInfos());
660 }
661 
662 TEST_CASE("GatherValidateTensorShapesFromInputs1DParams")
663 {
664  Graph graph;
666  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
667  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
668 
669  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
670 
671  CHECK_NOTHROW(graph.InferTensorInfos());
672 }
673 
674 TEST_CASE("GatherValidateTensorShapesFromInputsMultiDimIndices")
675 {
676  Graph graph;
678  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
679  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
680 
681  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
682 
683  CHECK_NOTHROW(graph.InferTensorInfos());
684 }
685 
686 TEST_CASE("DetectionPostProcessValidateTensorShapes")
687 {
688  Graph graph;
689  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
690  armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
691  std::vector<uint8_t> anchorsVector(40);
692  armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8, 0.0f, 0, true), anchorsVector);
693 
694  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
695  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
696  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
697  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
698 
699  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
700  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
701 
702  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
703  input1->GetOutputSlot().SetTensorInfo(scoresInfo);
704 
706  descriptor.m_MaxDetections = 3;
707 
708  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
709  layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
710  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
711  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
712  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
713  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
714 
715  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
716  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
717 
718  CHECK_NOTHROW(graph.InferTensorInfos());
719 }
720 
721 TEST_CASE("BackendCapabilityTest")
722 {
723  BackendId backendId = "MockBackend";
724 
725  armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
726 
727  // MockBackend does not support the NonConstWeights capability
728  CHECK(!armnn::HasCapability(nonConstWeights, backendId));
729  CHECK(!armnn::HasCapability("NonConstWeights", backendId));
730 
731  // MockBackend does not support the AsyncExecution capability
732  CHECK(!armnn::GetCapability("AsyncExecution", backendId).has_value());
733 }
734 
735 TEST_CASE("BackendHintTest")
736 {
737  class TestBackendAssignment : public StrategyBase<NoThrowStrategy>
738  {
739  public:
740 
741  void ExecuteStrategy(const armnn::IConnectableLayer* layer,
742  const armnn::BaseDescriptor& descriptor,
743  const std::vector<armnn::ConstTensor>& constants,
744  const char* name,
745  const armnn::LayerBindingId id = 0) override
746  {
747  armnn::IgnoreUnused(descriptor, constants, id, name);
748  switch (layer->GetType())
749  {
751  {
752  auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
753  const auto connectedLayerBackendId = inputLayer->GetOutputSlot(0).GetOwningLayer().GetBackendId();
754  CHECK((inputLayer->GetBackendId() == connectedLayerBackendId));
755  break;
756  }
758  {
759  auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
760  CHECK((outputLayer->GetBackendId() == "MockBackend"));
761  break;
762  }
764  {
765  auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
766  CHECK((activation->GetBackendId() == "CustomBackend"));
767  break;
768  }
769  default:
770  {
771  m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
772  }
773  }
774  }
775  };
776 
777  struct CustomPolicy
778  {
779  static const BackendId& GetIdStatic()
780  {
781  static BackendId id = "CustomBackend";
782  return id;
783  }
784  };
785 
786  struct MockPolicy
787  {
788  static const BackendId& GetIdStatic()
789  {
790  static BackendId id = "MockBackend";
791  return id;
792  }
793  };
794 
795  auto& backendRegistry = BackendRegistryInstance();
796 
797  backendRegistry.Register("MockBackend", []() { return std::make_unique<CustomAllocatorBackend<MockPolicy>>(); });
798 
799  backendRegistry.Register("CustomBackend",
800  []() { return std::make_unique<CustomAllocatorBackend<CustomPolicy>>(); });
801 
802  // Define the network
803  auto network = INetwork::Create();
806 
807  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
808  auto input = graph->AddLayer<InputLayer>(0, "input");
809  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
810  auto output = graph->AddLayer<OutputLayer>(0, "output");
811 
812  BackendId customBackendId("CustomBackend");
813  act->BackendSelectionHint(customBackendId);
814 
815  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
816  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
817 
818  OptimizedNetworkImpl optNet(std::move(graph));
819 
820  // Get the optimized graph
821  Graph& optGraph = optNet.GetGraph();
822 
823  std::vector<BackendId> prefs{ "MockBackend", "CustomBackend" };
824 
825  BackendIdSet availableBackends = { "CustomBackend", "MockBackend" };
826  DeviceSpec spec(availableBackends);
827 
828  BackendSettings backendSettings(prefs, spec);
829 
830  // Assign an available backend to each layer
831  Graph::Iterator firstLayer = optGraph.begin();
832  Graph::Iterator lastLayer = optGraph.end();
833 
834  OptimizedNetworkImpl* optNetObjPtr = &optNet;
835  OptimizationResult res = AssignBackends(optNetObjPtr,
836  backendSettings,
837  firstLayer,
838  lastLayer,
839  EmptyOptional());
840 
841  CHECK(res.IsOk());
842 
843  TestBackendAssignment visitor;
844  for (auto it = firstLayer; it != lastLayer; ++it)
845  {
846  (*it)->ExecuteStrategy(visitor);
847  }
848  // Clean up the registry for the next test.
849  backendRegistry.Deregister("MockBackend");
850  backendRegistry.Deregister("CustomBackend");
851 }
852 
853 // Tests that OptimizeForExclusiveConnections works, fusing when needed, using BatchNorm fusing as example
854 TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
855 {
856  using namespace armnn;
857  // Define layers information
858  Convolution2dDescriptor convolution2dDescriptor;
859  convolution2dDescriptor.m_BiasEnabled = false;
860  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
861  BatchNormalizationDescriptor batchNormDescriptor;
862  batchNormDescriptor.m_DataLayout = DataLayout::NHWC;
863 
864  const unsigned int inputDimensionSizes[] = { 1, 4, 4, 3 }; // NHWCin
865  const unsigned int weightsDimensionSizes[] = { 1, 2, 2, 3 }; // CoutHWCin
866  const unsigned int outputDimensionSizes[] = { 1, 3, 3, 1 }; // NHWCout
867  const unsigned int outputChannelSize[] = { outputDimensionSizes[3] }; // Cout
868 
869  TensorInfo inputInfo(4, inputDimensionSizes, DataType::Float32);
870  TensorInfo outputInfo(4, outputDimensionSizes, DataType::Float32);
871 
872  std::vector<float> weightsVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
873  ConstTensor weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32, 0.0f, 0, true), weightsVector);
874 
875  std::vector<float> betaVector = { 0.1f };
876  std::vector<float> gammaVector = { 0.5f };
877  std::vector<float> meanVector = { 0 };
878  std::vector<float> varianceVector = { 1 };
879  ConstTensor beta(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), betaVector);
880  ConstTensor gamma(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), gammaVector);
881  ConstTensor mean(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), meanVector);
882  ConstTensor variance(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), varianceVector);
883 
884  ConstantLayer* biasLayer = nullptr;
885 
886  // Define the network
887  Graph graph;
888  auto input = graph.AddLayer<InputLayer>(0, "input");
889  auto weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
890  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
891  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
892  auto output = graph.AddLayer<OutputLayer>(0, "output");
893 
894  // Set layer information
895  input->GetOutputSlot().SetTensorInfo(inputInfo);
896 
897  weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
898  weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
899  conv->GetOutputSlot().SetTensorInfo(outputInfo);
900 
901  batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
902  batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
903  batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
904  batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
905  batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
906 
907  if (convolution2dDescriptor.m_BiasEnabled)
908  {
909  std::vector<float> biasVector = { 11 };
910  ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), biasVector);
911  biasLayer =graph.AddLayer<ConstantLayer>("Bias");
912  biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(bias);
913  biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
914  biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
915  conv->m_Bias = biasLayer->m_LayerOutput;
916  }
917 
918  // Connect layers
919  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
920  weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1));
921  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
922  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
923 
924  // Temporary workaround to ensure the descriptor weights are populated
925  conv->m_Weight = weightsLayer->m_LayerOutput;
926 
927  if (convolution2dDescriptor.m_BiasEnabled)
928  {
929  CHECK(6 == graph.GetNumLayers());
930  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
931  &IsLayerOfType<InputLayer>,
932  &IsLayerOfType<ConstantLayer>,
933  &IsLayerOfType<ConstantLayer>,
934  &IsLayerOfType<Convolution2dLayer>,
935  &IsLayerOfType<BatchNormalizationLayer>,
936  &IsLayerOfType<OutputLayer>));
937  }
938  else
939  {
940  CHECK(5 == graph.GetNumLayers());
941  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
942  &IsLayerOfType<InputLayer>,
943  &IsLayerOfType<ConstantLayer>,
944  &IsLayerOfType<Convolution2dLayer>,
945  &IsLayerOfType<BatchNormalizationLayer>,
946  &IsLayerOfType<OutputLayer>));
947  }
948 
949  // Optimize graph
951 
952  auto checkFusedConv2d = [](const armnn::Layer* const layer) -> bool {
953  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
954  (layer->GetNameStr() == "fused-batchNorm-into-convolution");
955  };
956 
957  CHECK(5 == graph.GetNumLayers());
958  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
959  &IsLayerOfType<InputLayer>,
960  &IsLayerOfType<ConstantLayer>,
961  &IsLayerOfType<ConstantLayer>,
962  checkFusedConv2d,
963  &IsLayerOfType<OutputLayer>));
964 }
965 
966 // Tests that OptimizeForExclusiveConnections works, not fusing when not needed, using BatchNorm fusing as example
967 TEST_CASE("OptimizeForExclusiveConnectionsWithoutFuseTest")
968 {
969  // Define the network
970  Graph graph;
971  Convolution2dDescriptor convolution2dDescriptor;
972  BatchNormalizationDescriptor batchNormDescriptor;
973 
974  auto input = graph.AddLayer<InputLayer>(0, "input");
975  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
976  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
977  auto output = graph.AddLayer<OutputLayer>(0, "output");
978  auto output2 = graph.AddLayer<OutputLayer>(1, "output2");
979 
980  // Connect layers
981  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
982  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
983  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
984  conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
985 
986  CHECK(5 == graph.GetNumLayers());
987  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
988  &IsLayerOfType<armnn::InputLayer>,
989  &IsLayerOfType<armnn::Convolution2dLayer>,
990  &IsLayerOfType<armnn::BatchNormalizationLayer>,
991  &IsLayerOfType<armnn::OutputLayer>,
992  &IsLayerOfType<armnn::OutputLayer>));
993  // Optimize graph
995 
996  CHECK(5 == graph.GetNumLayers());
997  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
998  &IsLayerOfType<armnn::InputLayer>,
999  &IsLayerOfType<armnn::Convolution2dLayer>,
1000  &IsLayerOfType<armnn::BatchNormalizationLayer>,
1001  &IsLayerOfType<armnn::OutputLayer>,
1002  &IsLayerOfType<armnn::OutputLayer>));
1003 }
1004 } // Optimizer TestSuite
TEST_SUITE("TestConstTensorLayerVisitor")
A layer that the constant data can be bound to.
std::shared_ptr< ConstTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:169
std::shared_ptr< ConstTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_BiasEnabled
Enable/disable bias.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:1018
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
LstmBasicParameters m_BasicParameters
Definition: LstmLayer.hpp:20
This layer represents a batch normalization operation.
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const TensorInfo const TensorInfo & anchors
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::shared_ptr< ConstTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
DataLayout
Definition: Types.hpp:62
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
bool IsOutputSupported(const TensorInfo &, Optional< std::string &>) const override
float m_ClippingThresProj
Clipping threshold value for the projection.
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:193
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:336
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
This layer represents a depthwise convolution 2d operation.
std::shared_ptr< ConstTensorHandle > m_LayerOutput
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:425
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:179
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
The padding fields don&#39;t count and are ignored.
bool IsInputSupported(const TensorInfo &, Optional< std::string &>) const override
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
std::shared_ptr< ConstTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
This layer represents a detection postprocess operator.
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:16
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
std::shared_ptr< ConstTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Base class for all descriptors.
Definition: Descriptors.hpp:22
Strategy base class with empty implementations.
std::shared_ptr< ConstTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::shared_ptr< ConstTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::shared_ptr< ConstTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
DataType GetDataType() const
Definition: Tensor.hpp:198
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
const std::string & GetNameStr() const
Definition: Layer.hpp:225
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:271
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
std::shared_ptr< ConstTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
bool m_PeepholeEnabled
Enable/disable peephole.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::shared_ptr< ConstTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
std::shared_ptr< ConstTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
LstmOptPeepholeParameters m_PeepholeParameters
Definition: LstmLayer.hpp:23
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
Struct for the users to pass backend specific options.
LstmOptProjectionParameters m_ProjectionParameters
Definition: LstmLayer.hpp:22
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
void InferTensorInfos()
Definition: Graph.cpp:562
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:230
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:171
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
DataType GetDataType() const
Definition: Layer.cpp:313
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
LstmOptCifgParameters m_CifgParameters
Definition: LstmLayer.hpp:21
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:181
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
const TensorInfo & input1
A Pooling2dDescriptor for the Pooling2dLayer.
std::shared_ptr< ConstTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
size_t GetNumLayers() const
Definition: Graph.hpp:198
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:476
const char * GetLayerTypeAsCString(LayerType type)
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13
const TensorInfo const TensorInfo & mean
std::shared_ptr< ConstTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467
std::unique_ptr< IBackendContext > IBackendContextPtr