ArmNN
 22.08
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
12 
14 
15 #include <doctest/doctest.h>
16 
17 namespace
18 {
19 armnn::Graph dummyGraph;
20 
21 // Make a dummy TensorInfo object.
22 template<armnn::DataType DataType>
23 armnn::TensorInfo MakeDummyTensorInfo()
24 {
25  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
26 }
27 
28 
29 // Make a dummy WorkloadInfo using a dummy TensorInfo.
30 template<armnn::DataType DataType>
31 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
32 {
34 
35  for (unsigned int i=0; i < numInputs; i++)
36  {
37  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
38  }
39 
40  for (unsigned int o=0; o < numOutputs; o++)
41  {
42  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
43  }
44 
45  return info;
46 }
47 
48 // Template class to create a dummy layer (2 parameters).
49 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
50 struct DummyLayer
51 {
52  DummyLayer()
53  {
54  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
55  }
56 
57  ~DummyLayer()
58  {
59  dummyGraph.EraseLayer(m_Layer);
60  }
61 
63 };
64 
65 // Template class to create a dummy layer (1 parameter).
66 template<typename LayerType>
67 struct DummyLayer<LayerType, void>
68 {
69  DummyLayer()
70  {
71  m_Layer = dummyGraph.AddLayer<LayerType>("");
72  }
73 
74  ~DummyLayer()
75  {
76  dummyGraph.EraseLayer(m_Layer);
77  }
78 
80 };
81 
82 template<>
83 struct DummyLayer<armnn::BatchNormalizationLayer>
84 {
85  DummyLayer()
86  {
88  m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
90  m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
92  m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
94  m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
96  }
97 
98  ~DummyLayer()
99  {
100  dummyGraph.EraseLayer(m_Layer);
101  }
102 
104 };
105 
106 template<>
107 struct DummyLayer<armnn::BatchToSpaceNdLayer>
108 {
109  DummyLayer()
110  {
112  }
113 
114  ~DummyLayer()
115  {
116  dummyGraph.EraseLayer(m_Layer);
117  }
118 
120 };
121 
122 template<>
123 struct DummyLayer<armnn::ConstantLayer, void>
124 {
125  DummyLayer()
126  {
127  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
128  }
129 
130  ~DummyLayer()
131  {
132  dummyGraph.EraseLayer(m_Layer);
133  }
134 
136 };
137 
138 template<>
139 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
140 {
141  DummyLayer()
142  {
144  }
145 
146  ~DummyLayer()
147  {
148  dummyGraph.EraseLayer(m_Layer);
149  }
150 
152 };
153 
154 template<>
155 struct DummyLayer<armnn::ConcatLayer>
156 {
157  DummyLayer()
158  {
159  armnn::OriginsDescriptor desc(2);
160  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
161  }
162 
163  ~DummyLayer()
164  {
165  dummyGraph.EraseLayer(m_Layer);
166  }
167 
169 };
170 
171 template<>
172 struct DummyLayer<armnn::MapLayer, void>
173 {
174  DummyLayer()
175  {
176  m_Layer = dummyGraph.AddLayer<armnn::MapLayer>("");
177  }
178 
179  ~DummyLayer()
180  {
181  dummyGraph.EraseLayer(m_Layer);
182  }
183 
185 };
186 
187 template<>
188 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
189 {
190  DummyLayer()
191  {
193  }
194 
195  ~DummyLayer()
196  {
197  dummyGraph.EraseLayer(m_Layer);
198  }
199 
201 };
202 
203 template<>
204 struct DummyLayer<armnn::SplitterLayer>
205 {
206  DummyLayer()
207  {
208  armnn::ViewsDescriptor desc(1);
209  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
210  }
211 
212  ~DummyLayer()
213  {
214  dummyGraph.EraseLayer(m_Layer);
215  }
216 
218 };
219 
220 template<>
221 struct DummyLayer<armnn::UnmapLayer, void>
222 {
223  DummyLayer()
224  {
225  m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
226  }
227 
228  ~DummyLayer()
229  {
230  dummyGraph.EraseLayer(m_Layer);
231  }
232 
234 };
235 
236 template <typename ConvolutionLayerType>
237 struct DummyConvolutionLayer
238 {
239  DummyConvolutionLayer()
240  {
241  typename ConvolutionLayerType::DescriptorType desc;
242  desc.m_StrideX = 1;
243  desc.m_StrideY = 1;
244  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
245  m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
247  m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
249  }
250 
251  ~DummyConvolutionLayer()
252  {
253  dummyGraph.EraseLayer(m_Layer);
254  }
255 
256  ConvolutionLayerType* m_Layer;
257 };
258 
259 template<>
260 struct DummyLayer<armnn::Convolution2dLayer>
261  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
262 {
263 };
264 
265 template<>
266 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
267  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
268 {
269 };
270 
271 template<>
272 struct DummyLayer<armnn::TransposeConvolution2dLayer>
273  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
274 {
275 };
276 
277 template<>
278 struct DummyLayer<armnn::DetectionPostProcessLayer>
279 {
280  DummyLayer()
281  {
283  m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
285  }
286 
287  ~DummyLayer()
288  {
289  dummyGraph.EraseLayer(m_Layer);
290  }
291 
293 };
294 
295 template <typename LstmLayerType>
296 struct DummyLstmLayer
297 {
298  DummyLstmLayer()
299  {
300  typename LstmLayerType::DescriptorType desc;
301  desc.m_CifgEnabled = false;
302 
303  m_Layer = dummyGraph.AddLayer<LstmLayerType>(desc, "");
304  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
306  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
308  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
310  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
312  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
314  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
316  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
318  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
320  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
322 
323  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
325  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
327  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
329  }
330 
331  ~DummyLstmLayer()
332  {
333  dummyGraph.EraseLayer(m_Layer);
334  }
335 
337 };
338 
339 template<>
340 struct DummyLayer<armnn::LstmLayer>
341  : public DummyLstmLayer<armnn::LstmLayer>
342 {
343 };
344 
345 template <typename UnidirectionalSequenceLstmLayerType>
346 struct DummyUnidirectionalSequenceLstmLayer
347 {
348  DummyUnidirectionalSequenceLstmLayer()
349  {
350  typename UnidirectionalSequenceLstmLayerType::DescriptorType desc;
351  desc.m_CifgEnabled = false;
352 
353  m_Layer = dummyGraph.AddLayer<UnidirectionalSequenceLstmLayerType>(desc, "");
354  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
356  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
358  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
360  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
362  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
364  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
366  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
368  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
370  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
372 
373  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
375  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
377  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
379  }
380 
381  ~DummyUnidirectionalSequenceLstmLayer()
382  {
383  dummyGraph.EraseLayer(m_Layer);
384  }
385 
387 };
388 
389 template<>
390 struct DummyLayer<armnn::UnidirectionalSequenceLstmLayer>
391  : public DummyUnidirectionalSequenceLstmLayer<armnn::UnidirectionalSequenceLstmLayer>
392 {
393 };
394 
395 template<>
396 struct DummyLayer<armnn::QLstmLayer>
397 {
398  DummyLayer()
399  {
401  desc.m_CifgEnabled = false;
402  desc.m_PeepholeEnabled = true;
403  desc.m_ProjectionEnabled = true;
404  desc.m_LayerNormEnabled = true;
405 
406  m_Layer = dummyGraph.AddLayer<armnn::QLstmLayer>(desc, "qLstm");
407 
408  // Basic params
409  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
411  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
413  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
415 
416  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
418  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
420  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
422 
423  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
425  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
427  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
429 
430  // CIFG optional params
431  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
433  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
435  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
437 
438  // Projection optional params
439  m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
441  m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>(
443 
444  // Peephole optional params
445  m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
447  m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
449  m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
451 
452  // Layer normalization optional params
453  m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
455  m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
457  m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
459  m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
461  }
462 
463  ~DummyLayer()
464  {
465  dummyGraph.EraseLayer(m_Layer);
466  }
467 
469 };
470 
471 template<>
472 struct DummyLayer<armnn::QuantizedLstmLayer, void>
473 {
474  DummyLayer()
475  {
476  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
477 
478  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
480  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
482  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
484  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
486 
487  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
489  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
491  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
493  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
495 
496  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
498  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
500  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
502  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
504  }
505 
506  ~DummyLayer()
507  {
508  dummyGraph.EraseLayer(m_Layer);
509  }
510 
512 };
513 
514 template<>
515 struct DummyLayer<armnn::FullyConnectedLayer>
516 {
517  DummyLayer()
518  {
520  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
521  m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
523  }
524 
525  ~DummyLayer()
526  {
527  dummyGraph.EraseLayer(m_Layer);
528  }
529 
531 };
532 
533 // Tag for giving LayerType entries a unique strong type each.
534 template<armnn::LayerType>
535 struct Tag{};
536 
537 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
538 template<armnn::DataType DataType> \
539 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
540 { \
541  using Type = armnn::name##Layer; \
542  using Desc = descType; \
543  using QueueDesc = armnn::name##QueueDescriptor; \
544  constexpr static const char* NameStr = #name; \
545  constexpr static const bool IsException = false; \
546  \
547  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
548  unsigned int nIn, unsigned int nOut) \
549  { \
550  QueueDesc desc; \
551  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
552  return factory->CreateWorkload(armnn::LayerType::name, desc, info); \
553  } \
554 };
555 
556 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \
557 template<armnn::DataType DataType> \
558 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
559 { \
560  using Type = armnn::name##Layer; \
561  using Desc = descType; \
562  using QueueDesc = armnn::name##QueueDescriptor; \
563  using Workload = armnn::name##Workload; \
564  constexpr static const char* NameStr = #name; \
565  constexpr static const bool IsException = false; \
566  \
567  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \
568  unsigned int nIn, unsigned int nOut) \
569  { \
570  IgnoreUnused(factory); \
571  QueueDesc desc; \
572  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
573  return std::make_unique<armnn::name##Workload>(desc, info); \
574  } \
575 };
576 
577 // Define a layer policy specialization for use with the IsLayerSupported tests.
578 // Use this version for layers whose constructor takes 1 parameter(name).
579 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
580 
581 // Define a layer policy specialization for use with the IsLayerSupported tests.
582 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
583 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
584 
585 
586 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
587 template<armnn::DataType DataType> \
588 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
589 { \
590  using Type = armnn::name##Layer; \
591  using Desc = descType; \
592  constexpr static const char* NameStr = #name; \
593  constexpr static const bool IsException = true; \
594  \
595  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
596  unsigned int nIn, unsigned int nOut) \
597  { \
598  IgnoreUnused(factory, nIn, nOut); \
599  return std::unique_ptr<armnn::IWorkload>(); \
600  } \
601 };
602 
603 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
604 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
605 
606 // Layer policy template.
607 template<armnn::LayerType Type, armnn::DataType DataType>
608 struct LayerTypePolicy;
609 
610 // Every entry in the armnn::LayerType enum must be accounted for below.
612 
614 
616 
617 DECLARE_LAYER_POLICY_2_PARAM(BatchMatMul)
618 
619 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
620 
622 
624 
625 DECLARE_LAYER_POLICY_2_PARAM(ChannelShuffle)
626 
628 
630 
632 
633 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
634 
635 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
636 
637 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
638 
639 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
640 
641 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
642 
643 DECLARE_LAYER_POLICY_2_PARAM(Convolution3d)
644 
646 
648 
650 
652 
653 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
654 
656 
658 
659 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
660 
662 
664 
666 
668 
670 
672 
674 
675 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
676 
677 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
678 
679 DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
680 
682 
684 
686 
688 
690 
692 
694 
695 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
696 
697 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
698 
700 
702 
704 
706 
708 
710 
711 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
712 
714 
716 
717 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
718 
720 
722 
724 
726 
728 
730 
732 
734 
736 
738 
740 
742 
744 
745 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
746 
748 
750 
752 
753 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
754 
755 DECLARE_LAYER_POLICY_2_PARAM(UnidirectionalSequenceLstm)
756 
758 
759 
760 // Generic implementation to get the number of input slots for a given layer type;
761 template<armnn::LayerType Type>
762 unsigned int GetNumInputs(const armnn::Layer& layer)
763 {
764  return layer.GetNumInputSlots();
765 }
766 
767 // Generic implementation to get the number of output slots for a given layer type;
768 template<armnn::LayerType Type>
769 unsigned int GetNumOutputs(const armnn::Layer& layer)
770 {
771  return layer.GetNumOutputSlots();
772 }
773 
774 template<>
775 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
776 {
777  IgnoreUnused(layer);
778  return 2;
779 }
780 
781 // Tests that the IsLayerSupported() function returns the correct value.
782 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
783 // Returns true if expectations are met, otherwise returns false.
784 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
785 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
786 {
787  using LayerPolicy = LayerTypePolicy<Type, DataType>;
788  using LayerType = typename LayerPolicy::Type;
789  using LayerDesc = typename LayerPolicy::Desc;
790  DummyLayer<LayerType, LayerDesc> layer;
791 
792  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
793  {
794  return true;
795  }
796 
797  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
798  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
799 
800  // Make another dummy layer just to make IsLayerSupported have valid inputs.
801  DummyLayer<armnn::ConstantLayer, void> previousLayer;
802  // Set output of the previous layer to a dummy tensor.
803  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
804  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
805  // Connect all outputs of the previous layer to inputs of tested layer.
806  for (unsigned int i = 0; i < numIn; i++)
807  {
808  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
809  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
810  previousLayerOutputSlot.Connect(layerInputSlot);
811  }
812  // Set outputs of tested layer to a dummy tensor.
813  for (unsigned int i = 0; i < numOut; i++)
814  {
815  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
816  }
817 
818  std::string layerName = LayerPolicy::NameStr;
819  std::string reasonIfUnsupported;
820  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
821  {
822  std::string errorMsg = " layer expected support but found none.";
823  try
824  {
825  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
826  CHECK_MESSAGE(retVal, layerName << errorMsg);
827  return retVal;
828  }
829  catch(const armnn::InvalidArgumentException& e)
830  {
831  IgnoreUnused(e);
832  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
833  return true;
834  }
835  catch(const std::exception& e)
836  {
837  errorMsg = e.what();
838  FAIL(layerName << ": " << errorMsg);
839  return false;
840  }
841  catch(...)
842  {
843  errorMsg = "Unexpected error while testing support for ";
844  FAIL(errorMsg << layerName);
845  return false;
846  }
847  }
848  else
849  {
850  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
851  try
852  {
853  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
854  CHECK_MESSAGE(retVal, layerName << errorMsg);
855  return retVal;
856  }
857  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
858  // using parameters that make IsLayerSupported() return false should throw an
859  // InvalidArgumentException or UnimplementedException.
860  catch(const armnn::InvalidArgumentException& e)
861  {
862  IgnoreUnused(e);
863  return true;
864  }
865  catch(const armnn::UnimplementedException& e)
866  {
867  IgnoreUnused(e);
868  return true;
869  }
870  catch(const std::exception& e)
871  {
872  errorMsg = e.what();
873  FAIL(layerName << ": " << errorMsg);
874  return false;
875  }
876  catch(...)
877  {
878  errorMsg = "Unexpected error while testing support for ";
879  FAIL(errorMsg << layerName);
880  return false;
881  }
882  }
883 }
884 
885 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
886 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
887 {
888  IgnoreUnused(factory);
889  return true;
890 }
891 
892 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
893 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
894 {
895  IgnoreUnused(factory);
896  return true;
897 }
898 
899 // Helper function to compute the next type in the LayerType enum.
900 constexpr armnn::LayerType NextType(armnn::LayerType type)
901 {
902  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
903 }
904 
905 // Termination function for determining the end of the LayerType enumeration.
906 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
907 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
908 {
909  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
910 }
911 
912 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
913 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
914 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
915 {
916  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
917 
918  return v &&
919  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
920  (factory, Tag<NextType(Type)>());
921 }
922 
923 // Helper function to pass through to the test framework.
924 template<typename FactoryType, armnn::DataType DataType>
925 bool IsLayerSupportedTests(FactoryType *factory)
926 {
927  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
928 }
929 
930 template<armnn::LayerType Type>
931 bool TestLayerTypeMatches()
932 {
933  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
934  using LayerType = typename LayerPolicy::Type;
935  using LayerDesc = typename LayerPolicy::Desc;
936  DummyLayer<LayerType, LayerDesc> layer;
937 
938  std::stringstream ss;
939  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
940  bool v = Type == layer.m_Layer->GetType();
941  CHECK_MESSAGE(v, ss.str());
942  return v;
943 }
944 
945 template<armnn::LayerType Type>
946 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
947 {
948  return TestLayerTypeMatches<Type>();
949 }
950 
951 template<armnn::LayerType Type>
952 bool LayerTypeMatchesTestImpl(Tag<Type>)
953 {
954  return TestLayerTypeMatches<Type>() &&
955  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
956 }
957 
958 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
959 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
960 {
961  armnn::Graph graph;
962  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
963 
964  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
965  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
966 
967  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
968  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
969 
970  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
971  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
972  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
973  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
974 
975  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
976 
977  return result;
978 }
979 
980 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
981 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
982 {
983  armnn::Graph graph;
985 
986  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
987  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
988 
989  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
990 
991  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
992 
993  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
994  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
995 
996  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
997 
998  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
999  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
1000 
1001  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
1002  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
1003 
1004  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1005  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1006 
1007  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1008 
1009  return result;
1010 }
1011 
1012 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1013 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
1014 {
1015  armnn::Graph graph;
1017 
1018  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
1019  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
1020 
1021  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
1022 
1023  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
1024 
1025  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
1026  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
1027 
1028  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
1029 
1030  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1031  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
1032 
1033  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
1034  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
1035 
1036  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1037  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1038 
1039  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1040 
1041  return result;
1042 }
1043 
1044 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1045 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
1046 {
1047  armnn::Graph graph;
1048  static const std::vector<unsigned> axes = {1, 0};
1049  armnn::MeanDescriptor desc(axes, false);
1050 
1051  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1052 
1053  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1054  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1055 
1056  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
1057  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
1058 
1059  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1060  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1061  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1062  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1063 
1064  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1065 
1066  return result;
1067 }
1068 
1069 // Tests that IsMeanSupported fails when input tensor dimensions
1070 // do not match output tensor dimensions when keepDims == true
1071 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1072 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1073 {
1074  armnn::Graph graph;
1075  static const std::vector<unsigned> axes = {};
1076  // Set keepDims == true
1077  armnn::MeanDescriptor desc(axes, true);
1078 
1079  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1080 
1081  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1082  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1083 
1084  // Mismatching number of tensor dimensions
1085  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
1086  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1087 
1088  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1089  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1090  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1091  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1092 
1093  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1094 
1095  return result;
1096 }
1097 
1098 template<typename FactoryType, armnn::DataType OutputDataType>
1099 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1100 {
1101  armnn::Graph graph;
1102 
1103  armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
1104  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
1105 
1106  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1107 
1108  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1109  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1110 
1111  bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
1112 
1113  return result;
1114 }
1115 
1116 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
bool IsLayerSupported(const armnn::Layer *layer)
Definition: MockBackend.cpp:60
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:14
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:321
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle *> inputs, std::vector< ITensorHandle *> outputs)
Definition: Splitter.hpp:17
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
Definition: Stack.cpp:12
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:456
bool m_PeepholeEnabled
Enable/disable peephole.
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
Definition: Reduce.cpp:70
IConnectableLayer * m_Layer
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
Copyright (c) 2021 ARM Limited and Contributors.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:16
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:322
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
This layer represents a memory copy operation.
Definition: MapLayer.hpp:13
This layer represents a memory copy operation.
Definition: UnmapLayer.hpp:13
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t GetNumInputs(bool biasEnabled)
DataType
Definition: Types.hpp:48
This layer represents a fully connected operation.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:41
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis)
Definition: Gather.cpp:17
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:19
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const ITensorHandle *inputHandle, ITensorHandle *outputHandle, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:29
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
This layer represents a LSTM operation.
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:232
bool m_ProjectionEnabled
Enable/disable the projection layer.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
void Pooling3d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling3dDescriptor &params)
Computes the Pooling3d operation.
Definition: Pooling3d.cpp:172
Contains information about TensorInfos of a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:142
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
An input connection slot for a layer.
Definition: INetwork.hpp:25
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468