ArmNN
 21.08
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
12 
14 
15 #include <doctest/doctest.h>
16 
17 namespace
18 {
19 armnn::Graph dummyGraph;
20 
21 // Make a dummy TensorInfo object.
22 template<armnn::DataType DataType>
23 armnn::TensorInfo MakeDummyTensorInfo()
24 {
25  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
26 }
27 
28 
29 // Make a dummy WorkloadInfo using a dummy TensorInfo.
30 template<armnn::DataType DataType>
31 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
32 {
34 
35  for (unsigned int i=0; i < numInputs; i++)
36  {
37  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
38  }
39 
40  for (unsigned int o=0; o < numOutputs; o++)
41  {
42  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
43  }
44 
45  return info;
46 }
47 
48 // Template class to create a dummy layer (2 parameters).
49 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
50 struct DummyLayer
51 {
52  DummyLayer()
53  {
54  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
55  }
56 
57  ~DummyLayer()
58  {
59  dummyGraph.EraseLayer(m_Layer);
60  }
61 
63 };
64 
65 // Template class to create a dummy layer (1 parameter).
66 template<typename LayerType>
67 struct DummyLayer<LayerType, void>
68 {
69  DummyLayer()
70  {
71  m_Layer = dummyGraph.AddLayer<LayerType>("");
72  }
73 
74  ~DummyLayer()
75  {
76  dummyGraph.EraseLayer(m_Layer);
77  }
78 
80 };
81 
82 template<>
83 struct DummyLayer<armnn::BatchNormalizationLayer>
84 {
85  DummyLayer()
86  {
88  m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
90  m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
92  m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
94  m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
96  }
97 
98  ~DummyLayer()
99  {
100  dummyGraph.EraseLayer(m_Layer);
101  }
102 
104 };
105 
106 template<>
107 struct DummyLayer<armnn::BatchToSpaceNdLayer>
108 {
109  DummyLayer()
110  {
112  }
113 
114  ~DummyLayer()
115  {
116  dummyGraph.EraseLayer(m_Layer);
117  }
118 
120 };
121 
122 template<>
123 struct DummyLayer<armnn::ConstantLayer, void>
124 {
125  DummyLayer()
126  {
127  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
128  }
129 
130  ~DummyLayer()
131  {
132  dummyGraph.EraseLayer(m_Layer);
133  }
134 
136 };
137 
138 template<>
139 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
140 {
141  DummyLayer()
142  {
144  }
145 
146  ~DummyLayer()
147  {
148  dummyGraph.EraseLayer(m_Layer);
149  }
150 
152 };
153 
154 template<>
155 struct DummyLayer<armnn::ConcatLayer>
156 {
157  DummyLayer()
158  {
159  armnn::OriginsDescriptor desc(2);
160  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
161  }
162 
163  ~DummyLayer()
164  {
165  dummyGraph.EraseLayer(m_Layer);
166  }
167 
169 };
170 
171 template<>
172 struct DummyLayer<armnn::MapLayer, void>
173 {
174  DummyLayer()
175  {
176  m_Layer = dummyGraph.AddLayer<armnn::MapLayer>("");
177  }
178 
179  ~DummyLayer()
180  {
181  dummyGraph.EraseLayer(m_Layer);
182  }
183 
185 };
186 
187 template<>
188 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
189 {
190  DummyLayer()
191  {
193  }
194 
195  ~DummyLayer()
196  {
197  dummyGraph.EraseLayer(m_Layer);
198  }
199 
201 };
202 
203 template<>
204 struct DummyLayer<armnn::SplitterLayer>
205 {
206  DummyLayer()
207  {
208  armnn::ViewsDescriptor desc(1);
209  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
210  }
211 
212  ~DummyLayer()
213  {
214  dummyGraph.EraseLayer(m_Layer);
215  }
216 
218 };
219 
220 template<>
221 struct DummyLayer<armnn::UnmapLayer, void>
222 {
223  DummyLayer()
224  {
225  m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
226  }
227 
228  ~DummyLayer()
229  {
230  dummyGraph.EraseLayer(m_Layer);
231  }
232 
234 };
235 
236 template <typename ConvolutionLayerType>
237 struct DummyConvolutionLayer
238 {
239  DummyConvolutionLayer()
240  {
241  typename ConvolutionLayerType::DescriptorType desc;
242  desc.m_StrideX = 1;
243  desc.m_StrideY = 1;
244  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
245  m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
247  m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
249  }
250 
251  ~DummyConvolutionLayer()
252  {
253  dummyGraph.EraseLayer(m_Layer);
254  }
255 
256  ConvolutionLayerType* m_Layer;
257 };
258 
259 template<>
260 struct DummyLayer<armnn::Convolution2dLayer>
261  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
262 {
263 };
264 
265 template<>
266 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
267  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
268 {
269 };
270 
271 template<>
272 struct DummyLayer<armnn::TransposeConvolution2dLayer>
273  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
274 {
275 };
276 
277 template<>
278 struct DummyLayer<armnn::DetectionPostProcessLayer>
279 {
280  DummyLayer()
281  {
283  m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
285  }
286 
287  ~DummyLayer()
288  {
289  dummyGraph.EraseLayer(m_Layer);
290  }
291 
293 };
294 
295 template <typename LstmLayerType>
296 struct DummyLstmLayer
297 {
298  DummyLstmLayer()
299  {
300  typename LstmLayerType::DescriptorType desc;
301  desc.m_CifgEnabled = false;
302 
303  m_Layer = dummyGraph.AddLayer<LstmLayerType>(desc, "");
304  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
306  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
308  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
310  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
312  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
314  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
316  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
318  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
320  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
322 
323  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
325  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
327  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
329  }
330 
331  ~DummyLstmLayer()
332  {
333  dummyGraph.EraseLayer(m_Layer);
334  }
335 
337 };
338 
339 template<>
340 struct DummyLayer<armnn::LstmLayer>
341  : public DummyLstmLayer<armnn::LstmLayer>
342 {
343 };
344 
345 template <typename UnidirectionalSequenceLstmLayerType>
346 struct DummyUnidirectionalSequenceLstmLayer
347 {
348  DummyUnidirectionalSequenceLstmLayer()
349  {
350  typename UnidirectionalSequenceLstmLayerType::DescriptorType desc;
351  desc.m_CifgEnabled = false;
352 
353  m_Layer = dummyGraph.AddLayer<UnidirectionalSequenceLstmLayerType>(desc, "");
354  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
356  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
358  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
360  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
362  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
364  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
366  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
368  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
370  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
372 
373  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
375  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
377  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
379  }
380 
381  ~DummyUnidirectionalSequenceLstmLayer()
382  {
383  dummyGraph.EraseLayer(m_Layer);
384  }
385 
387 };
388 
389 template<>
390 struct DummyLayer<armnn::UnidirectionalSequenceLstmLayer>
391  : public DummyUnidirectionalSequenceLstmLayer<armnn::UnidirectionalSequenceLstmLayer>
392 {
393 };
394 
395 template<>
396 struct DummyLayer<armnn::QLstmLayer>
397 {
398  DummyLayer()
399  {
401  desc.m_CifgEnabled = false;
402  desc.m_PeepholeEnabled = true;
403  desc.m_ProjectionEnabled = true;
404  desc.m_LayerNormEnabled = true;
405 
406  m_Layer = dummyGraph.AddLayer<armnn::QLstmLayer>(desc, "qLstm");
407 
408  // Basic params
409  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
411  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
413  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
415 
416  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
418  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
420  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
422 
423  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
425  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
427  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
429 
430  // CIFG optional params
431  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
433  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
435  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
437 
438  // Projection optional params
439  m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
441  m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>(
443 
444  // Peephole optional params
445  m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
447  m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
449  m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
451 
452  // Layer normalization optional params
453  m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
455  m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
457  m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
459  m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
461  }
462 
463  ~DummyLayer()
464  {
465  dummyGraph.EraseLayer(m_Layer);
466  }
467 
469 };
470 
471 template<>
472 struct DummyLayer<armnn::QuantizedLstmLayer, void>
473 {
474  DummyLayer()
475  {
476  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
477 
478  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
480  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
482  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
484  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
486 
487  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
489  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
491  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
493  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
495 
496  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
498  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
500  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
502  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
504  }
505 
506  ~DummyLayer()
507  {
508  dummyGraph.EraseLayer(m_Layer);
509  }
510 
512 };
513 
514 template<>
515 struct DummyLayer<armnn::FullyConnectedLayer>
516 {
517  DummyLayer()
518  {
520  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
521  m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
523  }
524 
525  ~DummyLayer()
526  {
527  dummyGraph.EraseLayer(m_Layer);
528  }
529 
531 };
532 
533 // Tag for giving LayerType entries a unique strong type each.
534 template<armnn::LayerType>
535 struct Tag{};
536 
537 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
538 template<armnn::DataType DataType> \
539 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
540 { \
541  using Type = armnn::name##Layer; \
542  using Desc = descType; \
543  using QueueDesc = armnn::name##QueueDescriptor; \
544  constexpr static const char* NameStr = #name; \
545  constexpr static const bool IsException = false; \
546  \
547  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
548  unsigned int nIn, unsigned int nOut) \
549  { \
550  QueueDesc desc; \
551  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
552  return factory->Create##name(desc, info); \
553  } \
554 };
555 
556 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \
557 template<armnn::DataType DataType> \
558 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
559 { \
560  using Type = armnn::name##Layer; \
561  using Desc = descType; \
562  using QueueDesc = armnn::name##QueueDescriptor; \
563  using Workload = armnn::name##Workload; \
564  constexpr static const char* NameStr = #name; \
565  constexpr static const bool IsException = false; \
566  \
567  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \
568  unsigned int nIn, unsigned int nOut) \
569  { \
570  IgnoreUnused(factory); \
571  QueueDesc desc; \
572  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
573  return std::make_unique<armnn::name##Workload>(desc, info); \
574  } \
575 };
576 
577 // Define a layer policy specialization for use with the IsLayerSupported tests.
578 // Use this version for layers whose constructor takes 1 parameter(name).
579 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
580 
581 // Define a layer policy specialization for use with the IsLayerSupported tests.
582 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
583 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
584 
585 
586 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
587 template<armnn::DataType DataType> \
588 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
589 { \
590  using Type = armnn::name##Layer; \
591  using Desc = descType; \
592  constexpr static const char* NameStr = #name; \
593  constexpr static const bool IsException = true; \
594  \
595  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
596  unsigned int nIn, unsigned int nOut) \
597  { \
598  IgnoreUnused(factory, nIn, nOut); \
599  return std::unique_ptr<armnn::IWorkload>(); \
600  } \
601 };
602 
603 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
604 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
605 
606 // Layer policy template.
607 template<armnn::LayerType Type, armnn::DataType DataType>
608 struct LayerTypePolicy;
609 
610 // Every entry in the armnn::LayerType enum must be accounted for below.
612 
614 
616 
617 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
618 
620 
622 
624 
626 
628 
629 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
630 
631 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
632 
633 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
634 
635 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
636 
637 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
638 
640 
642 
644 
646 
647 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
648 
650 
652 
653 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
654 
656 
658 
660 
662 
664 
666 
667 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
668 
669 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
670 
671 DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
672 
674 
676 
678 
680 
682 
684 
686 
687 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
688 
689 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
690 
692 
694 
696 
698 
700 
701 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
702 
704 
706 
707 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
708 
710 
712 
714 
716 
718 
720 
722 
724 
726 
728 
730 
732 
734 
735 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
736 
738 
740 
742 
743 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
744 
745 DECLARE_LAYER_POLICY_2_PARAM(UnidirectionalSequenceLstm)
746 
748 
749 
750 // Generic implementation to get the number of input slots for a given layer type;
751 template<armnn::LayerType Type>
752 unsigned int GetNumInputs(const armnn::Layer& layer)
753 {
754  return layer.GetNumInputSlots();
755 }
756 
757 // Generic implementation to get the number of output slots for a given layer type;
758 template<armnn::LayerType Type>
759 unsigned int GetNumOutputs(const armnn::Layer& layer)
760 {
761  return layer.GetNumOutputSlots();
762 }
763 
764 template<>
765 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
766 {
767  IgnoreUnused(layer);
768  return 2;
769 }
770 
771 // Tests that the IsLayerSupported() function returns the correct value.
772 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
773 // Returns true if expectations are met, otherwise returns false.
774 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
775 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
776 {
777  using LayerPolicy = LayerTypePolicy<Type, DataType>;
778  using LayerType = typename LayerPolicy::Type;
779  using LayerDesc = typename LayerPolicy::Desc;
780  DummyLayer<LayerType, LayerDesc> layer;
781 
782  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
783  {
784  return true;
785  }
786 
787  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
788  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
789 
790  // Make another dummy layer just to make IsLayerSupported have valid inputs.
791  DummyLayer<armnn::ConstantLayer, void> previousLayer;
792  // Set output of the previous layer to a dummy tensor.
793  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
794  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
795  // Connect all outputs of the previous layer to inputs of tested layer.
796  for (unsigned int i = 0; i < numIn; i++)
797  {
798  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
799  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
800  previousLayerOutputSlot.Connect(layerInputSlot);
801  }
802  // Set outputs of tested layer to a dummy tensor.
803  for (unsigned int i = 0; i < numOut; i++)
804  {
805  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
806  }
807 
808  std::string layerName = LayerPolicy::NameStr;
809  std::string reasonIfUnsupported;
810  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
811  {
812  std::string errorMsg = " layer expected support but found none.";
813  try
814  {
815  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
816  CHECK_MESSAGE(retVal, layerName << errorMsg);
817  return retVal;
818  }
819  catch(const armnn::InvalidArgumentException& e)
820  {
821  IgnoreUnused(e);
822  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
823  return true;
824  }
825  catch(const std::exception& e)
826  {
827  errorMsg = e.what();
828  FAIL(layerName << ": " << errorMsg);
829  return false;
830  }
831  catch(...)
832  {
833  errorMsg = "Unexpected error while testing support for ";
834  FAIL(errorMsg << layerName);
835  return false;
836  }
837  }
838  else
839  {
840  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
841  try
842  {
843  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
844  CHECK_MESSAGE(retVal, layerName << errorMsg);
845  return retVal;
846  }
847  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
848  // using parameters that make IsLayerSupported() return false should throw an
849  // InvalidArgumentException or UnimplementedException.
850  catch(const armnn::InvalidArgumentException& e)
851  {
852  IgnoreUnused(e);
853  return true;
854  }
855  catch(const armnn::UnimplementedException& e)
856  {
857  IgnoreUnused(e);
858  return true;
859  }
860  catch(const std::exception& e)
861  {
862  errorMsg = e.what();
863  FAIL(layerName << ": " << errorMsg);
864  return false;
865  }
866  catch(...)
867  {
868  errorMsg = "Unexpected error while testing support for ";
869  FAIL(errorMsg << layerName);
870  return false;
871  }
872  }
873 }
874 
875 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
876 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
877 {
878  IgnoreUnused(factory);
879  return true;
880 }
881 
882 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
883 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
884 {
885  IgnoreUnused(factory);
886  return true;
887 }
888 
889 // Helper function to compute the next type in the LayerType enum.
890 constexpr armnn::LayerType NextType(armnn::LayerType type)
891 {
892  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
893 }
894 
895 // Termination function for determining the end of the LayerType enumeration.
896 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
897 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
898 {
899  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
900 }
901 
902 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
903 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
904 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
905 {
906  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
907 
908  return v &&
909  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
910  (factory, Tag<NextType(Type)>());
911 }
912 
913 // Helper function to pass through to the test framework.
914 template<typename FactoryType, armnn::DataType DataType>
915 bool IsLayerSupportedTests(FactoryType *factory)
916 {
917  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
918 }
919 
920 template<armnn::LayerType Type>
921 bool TestLayerTypeMatches()
922 {
923  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
924  using LayerType = typename LayerPolicy::Type;
925  using LayerDesc = typename LayerPolicy::Desc;
926  DummyLayer<LayerType, LayerDesc> layer;
927 
928  std::stringstream ss;
929  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
930  bool v = Type == layer.m_Layer->GetType();
931  CHECK_MESSAGE(v, ss.str());
932  return v;
933 }
934 
935 template<armnn::LayerType Type>
936 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
937 {
938  return TestLayerTypeMatches<Type>();
939 }
940 
941 template<armnn::LayerType Type>
942 bool LayerTypeMatchesTestImpl(Tag<Type>)
943 {
944  return TestLayerTypeMatches<Type>() &&
945  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
946 }
947 
948 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
949 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
950 {
951  armnn::Graph graph;
952  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
953 
954  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
955  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
956 
957  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
958  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
959 
960  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
961  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
962  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
963  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
964 
965  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
966 
967  return result;
968 }
969 
970 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
971 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
972 {
973  armnn::Graph graph;
975 
976  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
977  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
978 
979  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
980 
981  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
982 
983  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
984  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
985 
986  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
987 
988  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
989  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
990 
991  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
992  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
993 
994  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
995  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
996 
997  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
998 
999  return result;
1000 }
1001 
1002 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1003 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
1004 {
1005  armnn::Graph graph;
1007 
1008  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
1009  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
1010 
1011  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
1012 
1013  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
1014 
1015  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
1016  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
1017 
1018  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
1019 
1020  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1021  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
1022 
1023  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
1024  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
1025 
1026  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1027  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1028 
1029  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1030 
1031  return result;
1032 }
1033 
1034 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1035 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
1036 {
1037  armnn::Graph graph;
1038  static const std::vector<unsigned> axes = {1, 0};
1039  armnn::MeanDescriptor desc(axes, false);
1040 
1041  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1042 
1043  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1044  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1045 
1046  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
1047  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
1048 
1049  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1050  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1051  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1052  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1053 
1054  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1055 
1056  return result;
1057 }
1058 
1059 // Tests that IsMeanSupported fails when input tensor dimensions
1060 // do not match output tensor dimensions when keepDims == true
1061 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1062 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1063 {
1064  armnn::Graph graph;
1065  static const std::vector<unsigned> axes = {};
1066  // Set keepDims == true
1067  armnn::MeanDescriptor desc(axes, true);
1068 
1069  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1070 
1071  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1072  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1073 
1074  // Mismatching number of tensor dimensions
1075  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
1076  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1077 
1078  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1079  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1080  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1081  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1082 
1083  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1084 
1085  return result;
1086 }
1087 
1088 template<typename FactoryType, armnn::DataType OutputDataType>
1089 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1090 {
1091  armnn::Graph graph;
1092 
1093  armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
1094  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
1095 
1096  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1097 
1098  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1099  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1100 
1101  bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
1102 
1103  return result;
1104 }
1105 
1106 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:14
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle *> inputs, std::vector< ITensorHandle *> outputs)
Definition: Splitter.hpp:17
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
Definition: Stack.cpp:12
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
bool m_PeepholeEnabled
Enable/disable peephole.
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
Definition: Reduce.cpp:71
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:449
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
Copyright (c) 2021 ARM Limited and Contributors.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:16
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:244
This layer represents a memory copy operation.
Definition: MapLayer.hpp:13
This layer represents a memory copy operation.
Definition: UnmapLayer.hpp:13
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
Layer * m_Layer
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:35
This layer represents a fully connected operation.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:37
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis)
Definition: Gather.cpp:17
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:19
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const ITensorHandle *inputHandle, ITensorHandle *outputHandle, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:29
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
This layer represents a LSTM operation.
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
bool m_ProjectionEnabled
Enable/disable the projection layer.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
Contains information about TensorInfos of a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:142
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
An input connection slot for a layer.
Definition: INetwork.hpp:24
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:405