ArmNN
 21.11
OptimizerTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TestUtils.hpp"
7 
8 #include <BackendSettings.hpp>
9 #include <Graph.hpp>
10 #include <Network.hpp>
11 #include <Optimizer.hpp>
12 
13 #include <armnn/BackendHelper.hpp>
15 #include <armnn/INetwork.hpp>
16 #include <armnn/StrategyBase.hpp>
17 
18 #include <armnn/utility/Assert.hpp>
21 
24 
25 #include <doctest/doctest.h>
26 
27 using namespace armnn;
28 
29 namespace
30 {
31 
32 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
33 {
34  LstmDescriptor layerDesc;
35  layerDesc.m_ActivationFunc = 4;
36  layerDesc.m_ClippingThresCell = 0.2f;
37  layerDesc.m_ClippingThresProj = 0.4f;
38  layerDesc.m_CifgEnabled = CifgEnabled;
39  layerDesc.m_PeepholeEnabled = false;
40  layerDesc.m_ProjectionEnabled = false;
41 
42  LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
43  unsigned int batchSize = 3;
44  unsigned int inputSize = 2;
45  unsigned int numUnits = 4;
46  unsigned int outputSize = 4;
47 
48  layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
49  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
50  layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
51  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
52  layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
53  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
54  layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
55  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
56  layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
57  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
58  layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
59  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
60  layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
61  (TensorInfo({ numUnits }, DataType::Float32));
62  layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
63  (TensorInfo({ numUnits }, DataType::Float32));
64  layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
65  (TensorInfo({ numUnits }, DataType::Float32));
66 
67  layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
68  layer->m_BasicParameters.m_InputToCellWeights->Allocate();
69  layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
73  layer->m_BasicParameters.m_ForgetGateBias->Allocate();
74  layer->m_BasicParameters.m_CellBias->Allocate();
75  layer->m_BasicParameters.m_OutputGateBias->Allocate();
76 
77  if (!layerDesc.m_CifgEnabled)
78  {
79  layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>
80  (TensorInfo({ numUnits, inputSize }, DataType::Float32));
81  layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>
82  (TensorInfo({ numUnits, outputSize }, DataType::Float32));
83  layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>
84  (TensorInfo({ numUnits }, DataType::Float32));
85  layer->m_CifgParameters.m_InputToInputWeights->Allocate();
87  layer->m_CifgParameters.m_InputGateBias->Allocate();
88  }
89 
90  if (layerDesc.m_ProjectionEnabled)
91  {
92  layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedTensorHandle>
93  (TensorInfo({ outputSize, numUnits }, DataType::Float32));
94  layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedTensorHandle>
95  (TensorInfo({ outputSize }, DataType::Float32));
97  layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
98  }
99 
100  if (layerDesc.m_PeepholeEnabled)
101  {
102  if (!layerDesc.m_CifgEnabled)
103  {
104  layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedTensorHandle>
105  (TensorInfo({ numUnits }, DataType::Float32));
106  layer->m_PeepholeParameters.m_CellToInputWeights->Allocate();
107  }
108  layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
109  (TensorInfo({ numUnits }, DataType::Float32));
110  layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
111  (TensorInfo({ numUnits }, DataType::Float32));
112  layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
113  layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
114  }
115 
116  // create input and output layers
117  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
118  Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
119  Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
120  Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
121  Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
122  Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
123  Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
124 
125  // connect up
126  armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
127  armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
128  armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
129  armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
131 
132  Connect(input, layer, lstmTensorInfo1, 0, 0);
133  Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
134  Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
135  Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
136  Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
137  Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
138  Connect(layer, output, lstmTensorInfo3, 3, 0);
139 }
140 
141 
142 class MockLayerSupport : public LayerSupportBase
143 {
144 public:
145  bool IsInputSupported(const TensorInfo& /*input*/,
146  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
147  {
148  return true;
149  }
150 
151  bool IsOutputSupported(const TensorInfo& /*input*/,
152  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
153  {
154  return true;
155  }
156 
157  bool IsActivationSupported(const TensorInfo& /*input0*/,
158  const TensorInfo& /*output*/,
159  const ActivationDescriptor& /*descriptor*/,
160  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
161  {
162  return true;
163  }
164 };
165 
166 template <typename NamePolicy>
167 class MockBackend : public IBackendInternal
168 {
169 public:
170  MockBackend() :
171  m_BackendCapabilities(NamePolicy::GetIdStatic(), {{"NullCapability", false}}),
172  m_CustomAllocator(false) {};
173  MockBackend(const BackendCapabilities& capabilities) :
174  m_BackendCapabilities(capabilities),
175  m_CustomAllocator(false) {};
176  ~MockBackend() = default;
177 
178  static const BackendId& GetIdStatic()
179  {
180  return NamePolicy::GetIdStatic();
181  }
182  const BackendId& GetId() const override
183  {
184  return GetIdStatic();
185  }
186 
187  IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
188  {
189  return nullptr;
190  };
191 
193  CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr&) const override
194  {
195  return nullptr;
196  }
197 
198  IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
199  {
200  return nullptr;
201  }
202 
203  IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
204  {
205  return std::make_shared<MockLayerSupport>();
206  }
207 
208  OptimizationViews OptimizeSubgraphView(const SubgraphView&) const override
209  {
210  return {};
211  };
212 
213  BackendCapabilities GetCapabilities() const override
214  {
215  return m_BackendCapabilities;
216  };
217 
218  virtual bool UseCustomMemoryAllocator(std::shared_ptr<ICustomAllocator> allocator,
219  armnn::Optional<std::string&> errMsg) override
220  {
221  IgnoreUnused(errMsg, allocator);
222  m_CustomAllocator = true;
223  return m_CustomAllocator;
224  }
225 
226  BackendCapabilities m_BackendCapabilities;
227  bool m_CustomAllocator;
228 };
229 
230 template <typename NamePolicy>
231 class NoProtectedModeMockBackend : public IBackendInternal
232 {
233 public:
234  NoProtectedModeMockBackend() : m_BackendCapabilities(NamePolicy::GetIdStatic(), {{"NullCapability", false}}) {};
235  NoProtectedModeMockBackend(const BackendCapabilities& capabilities) : m_BackendCapabilities(capabilities) {};
236  ~NoProtectedModeMockBackend() = default;
237 
238  static const BackendId& GetIdStatic()
239  {
240  return NamePolicy::GetIdStatic();
241  }
242  const BackendId& GetId() const override
243  {
244  return GetIdStatic();
245  }
246 
247  IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
248  {
249  return nullptr;
250  };
251 
253  CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr&) const override
254  {
255  return nullptr;
256  }
257 
258  IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
259  {
260  return nullptr;
261  }
262 
263  IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
264  {
265  return std::make_shared<MockLayerSupport>();
266  }
267 
268  OptimizationViews OptimizeSubgraphView(const SubgraphView&) const override
269  {
270  return {};
271  };
272 
273  BackendCapabilities GetCapabilities() const override
274  {
275  return m_BackendCapabilities;
276  };
277 
278  BackendCapabilities m_BackendCapabilities;
279 };
280 
281 } // namespace
282 
283 TEST_SUITE("Optimizer")
284 {
285 using namespace armnn::optimizations;
286 
287 TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGDisabledTest")
288 {
289  Graph graph;
290 
291  //Helper function creates graph containing LSTM layer with required input and output layers
292  CreateLSTMLayerHelper(graph, false);
293 
294  //This function used to call ValidateShapesFromInputs();
295  CHECK_NOTHROW(graph.InferTensorInfos());
296 }
297 
298 TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGEnabledTest")
299 {
300  Graph graph;
301 
302  //Helper function creates graph containing LSTM layer with required input and output layers
303  CreateLSTMLayerHelper(graph, true);
304 
305  //This function used to call ValidateShapesFromInputs();
306  CHECK_NOTHROW(graph.InferTensorInfos());
307 }
308 
309 TEST_CASE("InsertConvertersTest")
310 {
311  const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
312 
313  armnn::Graph graph;
314 
315  armnn::LayerBindingId inputId = 0;
316 
317  armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
318 
319  head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
321 
322  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
323  ->GetOutputHandler().SetTensorInfo(info);
324 
325  head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
327 
328  head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
330 
331  graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
332  ->GetOutputHandler().SetTensorInfo(info);
333 
334  // Check graph layer sequence before inserting convert layers
335  CHECK(CheckSequence(graph.cbegin(),
336  graph.cend(),
337  &IsLayerOfType<armnn::InputLayer>,
338  &IsLayerOfType<armnn::InputLayer>,
339  &IsLayerOfType<armnn::MemCopyLayer>,
340  &IsLayerOfType<armnn::FloorLayer>,
341  &IsLayerOfType<armnn::AdditionLayer>,
342  &IsLayerOfType<armnn::OutputLayer>));
343 
344  // Check layers have Float16 DataType
345  for (auto& layer : graph)
346  {
347  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
348  {
351  }
352  }
353 
354  // Insert convert layers either side of unsupported layer
355  for (auto& layer : graph)
356  {
357  if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
358  {
360  InsertConvertFp32ToFp16LayersAfter(graph, *layer);
361  }
362  }
363 
364  // Check layers have correct DataType after inserting convert layers
365  for (auto& layer : graph)
366  {
367  if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
368  {
371  }
372  else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
373  {
376  }
377  else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
378  {
381  }
382  }
383 
384  // Check sequence of layers after inserting convert layers
385  CHECK(CheckSequence(graph.cbegin(),
386  graph.cend(),
387  &IsLayerOfType<armnn::InputLayer>,
388  &IsLayerOfType<armnn::InputLayer>,
389  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
390  &IsLayerOfType<armnn::MemCopyLayer>,
391  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
392  &IsLayerOfType<armnn::FloorLayer>,
393  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
394  &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
395  &IsLayerOfType<armnn::AdditionLayer>,
396  &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
397  &IsLayerOfType<armnn::OutputLayer>));
398 }
399 
400 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
401  const unsigned int* weightsShape, const unsigned int* outputShape,
402  DataLayout dataLayout = DataLayout::NCHW)
403 {
404  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
405  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
406 
407  std::vector<float> weightsVector(90);
408  armnn::ConstTensor weights(
409  armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32, 0.0f, 0, true),
410  weightsVector);
411 
413  desc.m_BiasEnabled = false;
414  desc.m_StrideX = 1;
415  desc.m_StrideY = 1;
416  desc.m_DataLayout = dataLayout;
417 
418  Layer* input = graph.AddLayer<InputLayer>(0, "input");
419  input->GetOutputSlot().SetTensorInfo(inputInfo);
420 
421  Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
422  layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
423  layer->GetOutputSlot().SetTensorInfo(outputInfo);
424 
425  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
426  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
427  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
428 }
429 
430 TEST_CASE("Conv2dValidateTensorShapesFromInputs")
431 {
432  Graph graph;
433  const unsigned int inputShape[] = { 1, 3, 8, 16 };
434  const unsigned int weightsShape[] = { 2, 3, 5, 3 };
435  const unsigned int outputShape[] = { 1, 2, 4, 14 };
436  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
437 
438  CHECK_NOTHROW(graph.InferTensorInfos());
439 }
440 
441 TEST_CASE("Conv2dValidateTensorShapesFromInputsNhwc")
442 {
443  Graph graph;
444  const unsigned int inputShape[] = { 1, 8, 16, 3 };
445  const unsigned int weightsShape[] = { 2, 5, 3, 3 };
446  const unsigned int outputShape[] = { 1, 4, 14, 2 };
447  CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
448 
449  CHECK_NOTHROW(graph.InferTensorInfos());
450 }
451 
452 void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
453  const unsigned int* weightsShape, const unsigned int* outputShape,
454  DataLayout dataLayout = DataLayout::NCHW)
455 {
456  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
457  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
458 
459  std::vector<float> weightsVector(18);
460  armnn::ConstTensor weights(
461  armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32, 0.0f, 0, true),
462  weightsVector);
463 
465  desc.m_BiasEnabled = false;
466  desc.m_StrideX = 1;
467  desc.m_StrideY = 1;
468  desc.m_DataLayout = dataLayout;
469 
470  Layer* input = graph.AddLayer<InputLayer>(0, "input");
471  input->GetOutputSlot().SetTensorInfo(inputInfo);
472 
473  DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
474  layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
475  layer->GetOutputSlot().SetTensorInfo(outputInfo);
476 
477  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
478  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
479  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
480 }
481 
482 TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputs")
483 {
484  Graph graph;
485  const unsigned int inputShape[] = { 1, 2, 3, 3 };
486  const unsigned int weightsShape[] = { 1, 3, 3, 2 };
487  const unsigned int outputShape[] = { 1, 2, 1, 1 };
488  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
489 
490  CHECK_NOTHROW(graph.InferTensorInfos());
491 }
492 
493 TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputsNhwc")
494 {
495  Graph graph;
496  const unsigned int inputShape[] = { 1, 3, 3, 2 };
497  const unsigned int weightsShape[] = { 1, 3, 3, 2 };
498  const unsigned int outputShape[] = { 1, 1, 1, 2 };
499  CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
500 
501  CHECK_NOTHROW(graph.InferTensorInfos());
502 }
503 
504 void CreatePooling2dGraph(Graph& graph, const unsigned int* inputShape, const unsigned int* outputShape,
505  DataLayout dataLayout = DataLayout::NCHW)
506 {
507  armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
508  armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
509 
510  Pooling2dDescriptor desc;
512  desc.m_PoolWidth = desc.m_PoolHeight = 100;
513  desc.m_StrideX = desc.m_StrideY = 5;
514  desc.m_PadLeft = 50;
515  desc.m_PadRight = 50;
516  desc.m_PadTop = 50;
517  desc.m_PadBottom = 50;
519  desc.m_DataLayout = dataLayout;
520 
521  Layer* input = graph.AddLayer<InputLayer>(0, "input");
522  input->GetOutputSlot().SetTensorInfo(inputInfo);
523 
524  Pooling2dLayer* layer = graph.AddLayer<Pooling2dLayer>(desc, "pooling2d");
525  layer->GetOutputSlot().SetTensorInfo(outputInfo);
526 
527  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
528  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
529  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
530 }
531 
532 TEST_CASE("Pooling2dValidateTensorShapesFromInputs")
533 {
534  Graph graph;
535  const unsigned int inputShape[] = { 5, 3, 52, 60 };
536  const unsigned int outputShape[] = { 5, 3, 11, 13 };
537  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
538 
539  CHECK_NOTHROW(graph.InferTensorInfos());
540 }
541 
542 TEST_CASE("Pooling2dValidateTensorShapesFromInputsNhwc")
543 {
544  Graph graph;
545  const unsigned int inputShape[] = { 5, 52, 60, 3 };
546  const unsigned int outputShape[] = { 5, 11, 13, 3 };
547  CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
548 
549  CHECK_NOTHROW(graph.InferTensorInfos());
550 }
551 
552 void CreateResizeBilinearGraph(Graph& graph,
553  const unsigned int* inputShape,
554  const unsigned int* outputShape,
555  DataLayout dataLayout = DataLayout::NCHW)
556 {
557  TensorInfo inputInfo(4, inputShape, DataType::Float32);
558  TensorInfo outputInfo(4, outputShape, DataType::Float32);
559 
560  ResizeDescriptor desc;
562  desc.m_TargetHeight = 3;
563  desc.m_TargetWidth = 4;
564  desc.m_DataLayout = dataLayout;
565 
566  Layer* input = graph.AddLayer<InputLayer>(0, "input");
567  input->GetOutputSlot().SetTensorInfo(inputInfo);
568 
569  ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
570  layer->GetOutputSlot().SetTensorInfo(outputInfo);
571 
572  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
573  input->GetOutputSlot().Connect(layer->GetInputSlot(0));
574  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
575 }
576 
577 TEST_CASE("ResizeBilinearValidateTensorShapesFromInputs")
578 {
579  Graph graph;
580  const unsigned int inputShape[] = { 1, 2, 4, 5 };
581  const unsigned int outputShape[] = { 1, 2, 3, 4 };
582  CreateResizeBilinearGraph(graph, inputShape, outputShape);
583 
584  CHECK_NOTHROW(graph.InferTensorInfos());
585 }
586 
587 TEST_CASE("ResizeBilinearValidateTensorShapesFromInputsNhwc")
588 {
589  Graph graph;
590  const unsigned int inputShape[] = { 1, 4, 5, 2 };
591  const unsigned int outputShape[] = { 1, 3, 4, 2 };
592  CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
593 
594  CHECK_NOTHROW(graph.InferTensorInfos());
595 }
596 
597 void CreateGatherGraph(Graph& graph,
598  const armnn::TensorInfo& paramsInfo,
599  const armnn::TensorInfo& indicesInfo,
600  const armnn::TensorInfo& outputInfo)
601 {
602  Layer* input0 = graph.AddLayer<InputLayer>(0, "params");
603  input0->GetOutputSlot().SetTensorInfo(paramsInfo);
604 
605  Layer* input1 = graph.AddLayer<InputLayer>(1, "indices");
606  input1->GetOutputSlot().SetTensorInfo(indicesInfo);
607 
608  GatherDescriptor descriptor;
609  GatherLayer* layer = graph.AddLayer<GatherLayer>(descriptor, "gather");
610  layer->GetOutputSlot().SetTensorInfo(outputInfo);
611 
612  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
613  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
614  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
615  layer->GetOutputSlot().Connect(output->GetInputSlot(0));
616 }
617 
618 TEST_CASE("GatherValidateTensorShapesFromInputs")
619 {
620  Graph graph;
621  armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
622  armnn::TensorInfo indicesInfo({3}, DataType::Signed32);
623  armnn::TensorInfo outputInfo({3, 5}, DataType::Float32);
624 
625  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
626 
627  CHECK_NOTHROW(graph.InferTensorInfos());
628 }
629 
630 TEST_CASE("GatherValidateTensorShapesFromInputs1DParams")
631 {
632  Graph graph;
633  armnn::TensorInfo paramsInfo({8}, DataType::Float32);
634  armnn::TensorInfo indicesInfo({5}, DataType::Signed32);
635  armnn::TensorInfo outputInfo( {5}, DataType::Float32);
636 
637  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
638 
639  CHECK_NOTHROW(graph.InferTensorInfos());
640 }
641 
642 TEST_CASE("GatherValidateTensorShapesFromInputsMultiDimIndices")
643 {
644  Graph graph;
645  armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
646  armnn::TensorInfo indicesInfo({2, 2}, DataType::Signed32);
647  armnn::TensorInfo outputInfo({2, 2, 2, 5}, DataType::Float32);
648 
649  CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
650 
651  CHECK_NOTHROW(graph.InferTensorInfos());
652 }
653 
654 TEST_CASE("DetectionPostProcessValidateTensorShapes")
655 {
656  Graph graph;
657  armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
658  armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
659  std::vector<uint8_t> anchorsVector(40);
660  armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8, 0.0f, 0, true), anchorsVector);
661 
662  armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
663  armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
664  armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
665  armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
666 
667  Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
668  input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
669 
670  Layer* input1 = graph.AddLayer<InputLayer>(1, "score");
671  input1->GetOutputSlot().SetTensorInfo(scoresInfo);
672 
674  descriptor.m_MaxDetections = 3;
675 
676  DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
677  layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
678  layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
679  layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
680  layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
681  layer->GetOutputSlot(3).SetTensorInfo(numDetectionInfo);
682 
683  input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
684  input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
685 
686  CHECK_NOTHROW(graph.InferTensorInfos());
687 }
688 
689 TEST_CASE("BackendCapabilityTest")
690 {
691  BackendId backendId = "MockBackend";
692 
693  armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
694 
695  // MockBackend does not support the NonConstWeights capability
696  CHECK(!armnn::HasCapability(nonConstWeights, backendId));
697  CHECK(!armnn::HasCapability("NonConstWeights", backendId));
698 
699  // MockBackend does not support the AsyncExecution capability
700  CHECK(!armnn::GetCapability("AsyncExecution", backendId).has_value());
701 }
702 
703 TEST_CASE("BackendHintTest")
704 {
705  class TestBackendAssignment : public StrategyBase<NoThrowStrategy>
706  {
707  public:
708 
709  void ExecuteStrategy(const armnn::IConnectableLayer* layer,
710  const armnn::BaseDescriptor& descriptor,
711  const std::vector<armnn::ConstTensor>& constants,
712  const char* name,
713  const armnn::LayerBindingId id = 0) override
714  {
715  armnn::IgnoreUnused(descriptor, constants, id, name);
716  switch (layer->GetType())
717  {
719  {
720  auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
721  const auto connectedLayerBackendId = inputLayer->GetOutputSlot(0).GetOwningLayer().GetBackendId();
722  CHECK((inputLayer->GetBackendId() == connectedLayerBackendId));
723  break;
724  }
726  {
727  auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
728  CHECK((outputLayer->GetBackendId() == "MockBackend"));
729  break;
730  }
732  {
733  auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
734  CHECK((activation->GetBackendId() == "CustomBackend"));
735  break;
736  }
737  default:
738  {
739  m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
740  }
741  }
742  }
743  };
744 
745  struct CustomPolicy
746  {
747  static const BackendId& GetIdStatic()
748  {
749  static BackendId id = "CustomBackend";
750  return id;
751  }
752  };
753 
754  struct MockPolicy
755  {
756  static const BackendId& GetIdStatic()
757  {
758  static BackendId id = "MockBackend";
759  return id;
760  }
761  };
762 
763  auto& backendRegistry = BackendRegistryInstance();
764 
765  backendRegistry.Register("MockBackend", []() { return std::make_unique<MockBackend<MockPolicy>>(); });
766 
767  backendRegistry.Register("CustomBackend", []() { return std::make_unique<MockBackend<CustomPolicy>>(); });
768 
769  // Define the network
770  auto network = INetwork::Create();
773 
774  std::unique_ptr<Graph> graph = std::make_unique<Graph>();
775  auto input = graph->AddLayer<InputLayer>(0, "input");
776  auto act = graph->AddLayer<ActivationLayer>(desc, "activation");
777  auto output = graph->AddLayer<OutputLayer>(0, "output");
778 
779  BackendId customBackendId("CustomBackend");
780  act->BackendSelectionHint(customBackendId);
781 
782  input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
783  act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
784 
785  OptimizedNetworkImpl optNet(std::move(graph));
786 
787  // Get the optimized graph
788  Graph& optGraph = optNet.GetGraph();
789 
790  std::vector<BackendId> prefs{ "MockBackend", "CustomBackend" };
791 
792  BackendIdSet availableBackends = { "CustomBackend", "MockBackend" };
793  DeviceSpec spec(availableBackends);
794 
795  BackendSettings backendSettings(prefs, spec);
796 
797  // Assign an available backend to each layer
798  Graph::Iterator firstLayer = optGraph.begin();
799  Graph::Iterator lastLayer = optGraph.end();
800 
801  OptimizedNetworkImpl* optNetObjPtr = &optNet;
802  OptimizationResult res = AssignBackends(optNetObjPtr,
803  backendSettings,
804  firstLayer,
805  lastLayer,
806  EmptyOptional());
807 
808  CHECK(res.IsOk());
809 
810  TestBackendAssignment visitor;
811  for (auto it = firstLayer; it != lastLayer; ++it)
812  {
813  (*it)->ExecuteStrategy(visitor);
814  }
815  // Clean up the registry for the next test.
816  backendRegistry.Deregister("MockBackend");
817  backendRegistry.Deregister("CustomBackend");
818 }
819 
820 // Tests that OptimizeForExclusiveConnections works, fusing when needed, using BatchNorm fusing as example
821 TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
822 {
823  using namespace armnn;
824  // Define layers information
825  Convolution2dDescriptor convolution2dDescriptor;
826  convolution2dDescriptor.m_BiasEnabled = false;
827  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
828  BatchNormalizationDescriptor batchNormDescriptor;
829  batchNormDescriptor.m_DataLayout = DataLayout::NHWC;
830 
831  const unsigned int inputDimensionSizes[] = { 1, 4, 4, 3 }; // NHWCin
832  const unsigned int weightsDimensionSizes[] = { 1, 2, 2, 3 }; // CoutHWCin
833  const unsigned int outputDimensionSizes[] = { 1, 3, 3, 1 }; // NHWCout
834  const unsigned int outputChannelSize[] = { outputDimensionSizes[3] }; // Cout
835 
836  TensorInfo inputInfo(4, inputDimensionSizes, DataType::Float32);
837  TensorInfo outputInfo(4, outputDimensionSizes, DataType::Float32);
838 
839  std::vector<float> weightsVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
840  ConstTensor weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32, 0.0f, 0, true), weightsVector);
841 
842  std::vector<float> betaVector = { 0.1f };
843  std::vector<float> gammaVector = { 0.5f };
844  std::vector<float> meanVector = { 0 };
845  std::vector<float> varianceVector = { 1 };
846  ConstTensor beta(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), betaVector);
847  ConstTensor gamma(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), gammaVector);
848  ConstTensor mean(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), meanVector);
849  ConstTensor variance(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), varianceVector);
850 
851  // Define the network
852  Graph graph;
853  auto input = graph.AddLayer<InputLayer>(0, "input");
854  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
855  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
856  auto output = graph.AddLayer<OutputLayer>(0, "output");
857 
858  // Set layer information
859  input->GetOutputSlot().SetTensorInfo(inputInfo);
860  conv->GetOutputSlot().SetTensorInfo(outputInfo);
861  batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
862  conv->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
863  batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
864  batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
865  batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
866  batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
867  if (convolution2dDescriptor.m_BiasEnabled)
868  {
869  std::vector<float> biasVector = { 11 };
870  ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), biasVector);
871  conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
872  }
873 
874  // Connect layers
875  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
876  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
877  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
878 
879  CHECK(4 == graph.GetNumLayers());
880  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
881  &IsLayerOfType<InputLayer>,
882  &IsLayerOfType<Convolution2dLayer>,
883  &IsLayerOfType<BatchNormalizationLayer>,
884  &IsLayerOfType<OutputLayer>));
885 
886  // Optimize graph
888 
889  auto checkFusedConv2d = [](const armnn::Layer* const layer) -> bool {
890  return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
891  (layer->GetNameStr() == "fused-batchNorm-into-convolution");
892  };
893 
894  CHECK(3 == graph.GetNumLayers());
895  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
896  &IsLayerOfType<InputLayer>,
897  checkFusedConv2d,
898  &IsLayerOfType<OutputLayer>));
899 }
900 
901 // Tests that OptimizeForExclusiveConnections works, not fusing when not needed, using BatchNorm fusing as example
902 TEST_CASE("OptimizeForExclusiveConnectionsWithoutFuseTest")
903 {
904  // Define the network
905  Graph graph;
906  Convolution2dDescriptor convolution2dDescriptor;
907  BatchNormalizationDescriptor batchNormDescriptor;
908 
909  auto input = graph.AddLayer<InputLayer>(0, "input");
910  auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
911  auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
912  auto output = graph.AddLayer<OutputLayer>(0, "output");
913  auto output2 = graph.AddLayer<OutputLayer>(1, "output2");
914 
915  // Connect layers
916  input->GetOutputSlot(0).Connect(conv->GetInputSlot(0));
917  conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
918  batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
919  conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
920 
921  CHECK(5 == graph.GetNumLayers());
922  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
923  &IsLayerOfType<armnn::InputLayer>,
924  &IsLayerOfType<armnn::Convolution2dLayer>,
925  &IsLayerOfType<armnn::BatchNormalizationLayer>,
926  &IsLayerOfType<armnn::OutputLayer>,
927  &IsLayerOfType<armnn::OutputLayer>));
928  // Optimize graph
930 
931  CHECK(5 == graph.GetNumLayers());
932  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
933  &IsLayerOfType<armnn::InputLayer>,
934  &IsLayerOfType<armnn::Convolution2dLayer>,
935  &IsLayerOfType<armnn::BatchNormalizationLayer>,
936  &IsLayerOfType<armnn::OutputLayer>,
937  &IsLayerOfType<armnn::OutputLayer>));
938 }
939 } // Optimizer TestSuite
TEST_SUITE("TestConstTensorLayerVisitor")
std::shared_ptr< ConstTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:165
std::shared_ptr< ConstTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_BiasEnabled
Enable/disable bias.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:906
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
LstmBasicParameters m_BasicParameters
Definition: LstmLayer.hpp:20
This layer represents a batch normalization operation.
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::shared_ptr< ConstTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
DataLayout
Definition: Types.hpp:49
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
bool IsOutputSupported(const TensorInfo &, Optional< std::string &>) const override
float m_ClippingThresProj
Clipping threshold value for the projection.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:193
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:330
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:417
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:175
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
The padding fields don&#39;t count and are ignored.
bool IsInputSupported(const TensorInfo &, Optional< std::string &>) const override
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
std::shared_ptr< ConstTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
This layer represents a detection postprocess operator.
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:16
void IgnoreUnused(Ts &&...)
LayerList::const_iterator Iterator
Definition: Graph.hpp:51
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
std::shared_ptr< ConstTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Base class for all descriptors.
Definition: Descriptors.hpp:22
Strategy base class with empty implementations.
std::shared_ptr< ConstTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::shared_ptr< ConstTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
std::shared_ptr< ConstTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
This layer represents a Gather operator.
Definition: GatherLayer.hpp:14
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
DataType GetDataType() const
Definition: Tensor.hpp:198
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
const std::string & GetNameStr() const
Definition: Layer.hpp:220
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
std::shared_ptr< ConstTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
bool m_PeepholeEnabled
Enable/disable peephole.
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::shared_ptr< ConstTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
std::shared_ptr< ConstTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
LstmOptPeepholeParameters m_PeepholeParameters
Definition: LstmLayer.hpp:23
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
Struct for the users to pass backend specific options.
LstmOptProjectionParameters m_ProjectionParameters
Definition: LstmLayer.hpp:22
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
void InferTensorInfos()
Definition: Graph.cpp:558
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:167
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
DataType GetDataType() const
Definition: Layer.cpp:284
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
LstmOptCifgParameters m_CifgParameters
Definition: LstmLayer.hpp:21
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:177
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
A Pooling2dDescriptor for the Pooling2dLayer.
std::shared_ptr< ConstTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
size_t GetNumLayers() const
Definition: Graph.hpp:194
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:478
const char * GetLayerTypeAsCString(LayerType type)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
This layer represents a resize operation.
Definition: ResizeLayer.hpp:13
std::shared_ptr< ConstTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr< IBackendContext > IBackendContextPtr