ArmNN
 20.11
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
12 
14 
15 namespace
16 {
17 armnn::Graph dummyGraph;
18 
19 // Make a dummy TensorInfo object.
20 template<armnn::DataType DataType>
21 armnn::TensorInfo MakeDummyTensorInfo()
22 {
23  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
24 }
25 
26 
27 // Make a dummy WorkloadInfo using a dummy TensorInfo.
28 template<armnn::DataType DataType>
29 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
30 {
32 
33  for (unsigned int i=0; i < numInputs; i++)
34  {
35  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
36  }
37 
38  for (unsigned int o=0; o < numOutputs; o++)
39  {
40  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
41  }
42 
43  return info;
44 }
45 
46 // Template class to create a dummy layer (2 parameters).
47 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
48 struct DummyLayer
49 {
50  DummyLayer()
51  {
52  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
53  }
54 
55  ~DummyLayer()
56  {
57  dummyGraph.EraseLayer(m_Layer);
58  }
59 
61 };
62 
63 // Template class to create a dummy layer (1 parameter).
64 template<typename LayerType>
65 struct DummyLayer<LayerType, void>
66 {
67  DummyLayer()
68  {
69  m_Layer = dummyGraph.AddLayer<LayerType>("");
70  }
71 
72  ~DummyLayer()
73  {
74  dummyGraph.EraseLayer(m_Layer);
75  }
76 
78 };
79 
80 template<>
81 struct DummyLayer<armnn::BatchNormalizationLayer>
82 {
83  DummyLayer()
84  {
86  m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
88  m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
90  m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
92  m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
94  }
95 
96  ~DummyLayer()
97  {
98  dummyGraph.EraseLayer(m_Layer);
99  }
100 
102 };
103 
104 template<>
105 struct DummyLayer<armnn::BatchToSpaceNdLayer>
106 {
107  DummyLayer()
108  {
110  }
111 
112  ~DummyLayer()
113  {
114  dummyGraph.EraseLayer(m_Layer);
115  }
116 
118 };
119 
120 template<>
121 struct DummyLayer<armnn::ConstantLayer, void>
122 {
123  DummyLayer()
124  {
125  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
126  }
127 
128  ~DummyLayer()
129  {
130  dummyGraph.EraseLayer(m_Layer);
131  }
132 
134 };
135 
136 template<>
137 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
138 {
139  DummyLayer()
140  {
142  }
143 
144  ~DummyLayer()
145  {
146  dummyGraph.EraseLayer(m_Layer);
147  }
148 
150 };
151 
152 template<>
153 struct DummyLayer<armnn::ConcatLayer>
154 {
155  DummyLayer()
156  {
157  armnn::OriginsDescriptor desc(2);
158  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
159  }
160 
161  ~DummyLayer()
162  {
163  dummyGraph.EraseLayer(m_Layer);
164  }
165 
167 };
168 
169 template<>
170 struct DummyLayer<armnn::MapLayer, void>
171 {
172  DummyLayer()
173  {
174  m_Layer = dummyGraph.AddLayer<armnn::MapLayer>("");
175  }
176 
177  ~DummyLayer()
178  {
179  dummyGraph.EraseLayer(m_Layer);
180  }
181 
183 };
184 
185 template<>
186 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
187 {
188  DummyLayer()
189  {
191  }
192 
193  ~DummyLayer()
194  {
195  dummyGraph.EraseLayer(m_Layer);
196  }
197 
199 };
200 
201 template<>
202 struct DummyLayer<armnn::SplitterLayer>
203 {
204  DummyLayer()
205  {
206  armnn::ViewsDescriptor desc(1);
207  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
208  }
209 
210  ~DummyLayer()
211  {
212  dummyGraph.EraseLayer(m_Layer);
213  }
214 
216 };
217 
218 template<>
219 struct DummyLayer<armnn::UnmapLayer, void>
220 {
221  DummyLayer()
222  {
223  m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
224  }
225 
226  ~DummyLayer()
227  {
228  dummyGraph.EraseLayer(m_Layer);
229  }
230 
232 };
233 
234 template <typename ConvolutionLayerType>
235 struct DummyConvolutionLayer
236 {
237  DummyConvolutionLayer()
238  {
239  typename ConvolutionLayerType::DescriptorType desc;
240  desc.m_StrideX = 1;
241  desc.m_StrideY = 1;
242  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
243  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
245  m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
247  }
248 
249  ~DummyConvolutionLayer()
250  {
251  dummyGraph.EraseLayer(m_Layer);
252  }
253 
254  ConvolutionLayerType* m_Layer;
255 };
256 
257 template<>
258 struct DummyLayer<armnn::Convolution2dLayer>
259  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
260 {
261 };
262 
263 template<>
264 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
265  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
266 {
267 };
268 
269 template<>
270 struct DummyLayer<armnn::TransposeConvolution2dLayer>
271  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
272 {
273 };
274 
275 template<>
276 struct DummyLayer<armnn::DetectionPostProcessLayer>
277 {
278  DummyLayer()
279  {
281  m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
283  }
284 
285  ~DummyLayer()
286  {
287  dummyGraph.EraseLayer(m_Layer);
288  }
289 
291 };
292 
293 template <typename LstmLayerType>
294 struct DummyLstmLayer
295 {
296  DummyLstmLayer()
297  {
298  typename LstmLayerType::DescriptorType desc;
299  desc.m_CifgEnabled = false;
300 
301  m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
302  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
304  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
306  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
308  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
310  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
312  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
314  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
316  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
318  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
320 
321  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
323  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
325  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
327  }
328 
329  ~DummyLstmLayer()
330  {
331  dummyGraph.EraseLayer(m_Layer);
332  }
333 
335 };
336 
337 template<>
338 struct DummyLayer<armnn::LstmLayer>
339  : public DummyLstmLayer<armnn::LstmLayer>
340 {
341 };
342 
343 template <typename QLstmLayerType>
344 struct DummyQLstmLayer
345 {
346  DummyQLstmLayer()
347  {
348  typename QLstmLayerType::DescriptorType desc;
349  desc.m_CifgEnabled = false;
350  desc.m_PeepholeEnabled = true;
351  desc.m_ProjectionEnabled = true;
352  desc.m_LayerNormEnabled = true;
353 
354  m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
355 
356  // Basic params
357  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
359  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
361  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
363 
364  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
366  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
368  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
370 
371  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
373  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
375  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
377 
378  // CIFG optional params
379  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
381  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
383  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
385 
386  // Projection optional params
387  m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
389  m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
391 
392  // Peephole optional params
393  m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
395  m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
397  m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
399 
400  // Layer normalization optional params
401  m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
403  m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
405  m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
407  m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
409  }
410 
411  ~DummyQLstmLayer()
412  {
413  dummyGraph.EraseLayer(m_Layer);
414  }
415 
417 };
418 
419 template<>
420 struct DummyLayer<armnn::QuantizedLstmLayer, void>
421 {
422  DummyLayer()
423  {
424  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
425 
426  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
428  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
430  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
432  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
434 
435  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
437  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
439  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
441  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
443 
444  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
446  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
448  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
450  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
452  }
453 
454  ~DummyLayer()
455  {
456  dummyGraph.EraseLayer(m_Layer);
457  }
458 
460 };
461 
462 template<>
463 struct DummyLayer<armnn::FullyConnectedLayer>
464 {
465  DummyLayer()
466  {
468  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
469  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
471  }
472 
473  ~DummyLayer()
474  {
475  dummyGraph.EraseLayer(m_Layer);
476  }
477 
479 };
480 
481 // Tag for giving LayerType entries a unique strong type each.
482 template<armnn::LayerType>
483 struct Tag{};
484 
485 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
486 template<armnn::DataType DataType> \
487 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
488 { \
489  using Type = armnn::name##Layer; \
490  using Desc = descType; \
491  using QueueDesc = armnn::name##QueueDescriptor; \
492  constexpr static const char* NameStr = #name; \
493  constexpr static const bool IsException = false; \
494  \
495  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
496  unsigned int nIn, unsigned int nOut) \
497  { \
498  QueueDesc desc; \
499  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
500  return factory->Create##name(desc, info); \
501  } \
502 };
503 
504 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \
505 template<armnn::DataType DataType> \
506 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
507 { \
508  using Type = armnn::name##Layer; \
509  using Desc = descType; \
510  using QueueDesc = armnn::name##QueueDescriptor; \
511  using Workload = armnn::name##Workload; \
512  constexpr static const char* NameStr = #name; \
513  constexpr static const bool IsException = false; \
514  \
515  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \
516  unsigned int nIn, unsigned int nOut) \
517  { \
518  IgnoreUnused(factory); \
519  QueueDesc desc; \
520  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
521  return std::make_unique<armnn::name##Workload>(desc, info); \
522  } \
523 };
524 
525 // Define a layer policy specialization for use with the IsLayerSupported tests.
526 // Use this version for layers whose constructor takes 1 parameter(name).
527 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
528 
529 // Define a layer policy specialization for use with the IsLayerSupported tests.
530 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
531 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
532 
533 
534 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
535 template<armnn::DataType DataType> \
536 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
537 { \
538  using Type = armnn::name##Layer; \
539  using Desc = descType; \
540  constexpr static const char* NameStr = #name; \
541  constexpr static const bool IsException = true; \
542  \
543  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
544  unsigned int nIn, unsigned int nOut) \
545  { \
546  IgnoreUnused(factory, nIn, nOut); \
547  return std::unique_ptr<armnn::IWorkload>(); \
548  } \
549 };
550 
551 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
552 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
553 
554 // Layer policy template.
555 template<armnn::LayerType Type, armnn::DataType DataType>
556 struct LayerTypePolicy;
557 
558 // Every entry in the armnn::LayerType enum must be accounted for below.
560 
562 
564 
565 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
566 
568 
570 
572 
574 
575 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
576 
577 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
578 
579 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
580 
581 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
582 
583 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
584 
586 
588 
590 
592 
593 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
594 
596 
598 
599 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
600 
602 
604 
606 
608 
610 
612 
613 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
614 
615 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
616 
617 DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
618 
620 
622 
624 
626 
628 
630 
632 
633 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
634 
635 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
636 
638 
640 
642 
644 
646 
647 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
648 
651 
652 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
653 
655 
657 
659 
661 
663 
665 
667 
669 
671 
673 
675 
677 
678 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
679 
681 
683 
684 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
685 
687 
688 
689 // Generic implementation to get the number of input slots for a given layer type;
690 template<armnn::LayerType Type>
691 unsigned int GetNumInputs(const armnn::Layer& layer)
692 {
693  return layer.GetNumInputSlots();
694 }
695 
696 // Generic implementation to get the number of output slots for a given layer type;
697 template<armnn::LayerType Type>
698 unsigned int GetNumOutputs(const armnn::Layer& layer)
699 {
700  return layer.GetNumOutputSlots();
701 }
702 
703 template<>
704 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
705 {
706  IgnoreUnused(layer);
707  return 2;
708 }
709 
710 // Tests that the IsLayerSupported() function returns the correct value.
711 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
712 // Returns true if expectations are met, otherwise returns false.
713 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
714 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
715 {
716  using LayerPolicy = LayerTypePolicy<Type, DataType>;
717  using LayerType = typename LayerPolicy::Type;
718  using LayerDesc = typename LayerPolicy::Desc;
719  DummyLayer<LayerType, LayerDesc> layer;
720 
721  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
722  {
723  return true;
724  }
725 
726  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
727  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
728 
729  // Make another dummy layer just to make IsLayerSupported have valid inputs.
730  DummyLayer<armnn::ConstantLayer, void> previousLayer;
731  // Set output of the previous layer to a dummy tensor.
732  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
733  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
734  // Connect all outputs of the previous layer to inputs of tested layer.
735  for (unsigned int i = 0; i < numIn; i++)
736  {
737  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
738  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
739  previousLayerOutputSlot.Connect(layerInputSlot);
740  }
741  // Set outputs of tested layer to a dummy tensor.
742  for (unsigned int i = 0; i < numOut; i++)
743  {
744  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
745  }
746 
747  std::string layerName = LayerPolicy::NameStr;
748  std::string reasonIfUnsupported;
749  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
750  {
751  std::string errorMsg = " layer expected support but found none.";
752  try
753  {
754  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
755  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
756  return retVal;
757  }
758  catch(const armnn::InvalidArgumentException& e)
759  {
760  IgnoreUnused(e);
761  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
762  return true;
763  }
764  catch(const std::exception& e)
765  {
766  errorMsg = e.what();
767  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
768  return false;
769  }
770  catch(...)
771  {
772  errorMsg = "Unexpected error while testing support for ";
773  BOOST_TEST_ERROR(errorMsg << layerName);
774  return false;
775  }
776  }
777  else
778  {
779  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
780  try
781  {
782  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
783  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
784  return retVal;
785  }
786  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
787  // using parameters that make IsLayerSupported() return false should throw an
788  // InvalidArgumentException or UnimplementedException.
789  catch(const armnn::InvalidArgumentException& e)
790  {
791  IgnoreUnused(e);
792  return true;
793  }
794  catch(const armnn::UnimplementedException& e)
795  {
796  IgnoreUnused(e);
797  return true;
798  }
799  catch(const std::exception& e)
800  {
801  errorMsg = e.what();
802  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
803  return false;
804  }
805  catch(...)
806  {
807  errorMsg = "Unexpected error while testing support for ";
808  BOOST_TEST_ERROR(errorMsg << layerName);
809  return false;
810  }
811  }
812 }
813 
814 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
815 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
816 {
817  IgnoreUnused(factory);
818  return true;
819 }
820 
821 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
822 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
823 {
824  IgnoreUnused(factory);
825  return true;
826 }
827 
828 // Helper function to compute the next type in the LayerType enum.
829 constexpr armnn::LayerType NextType(armnn::LayerType type)
830 {
831  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
832 }
833 
834 // Termination function for determining the end of the LayerType enumeration.
835 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
836 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
837 {
838  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
839 }
840 
841 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
842 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
843 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
844 {
845  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
846 
847  return v &&
848  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
849  (factory, Tag<NextType(Type)>());
850 }
851 
852 // Helper function to pass through to the test framework.
853 template<typename FactoryType, armnn::DataType DataType>
854 bool IsLayerSupportedTests(FactoryType *factory)
855 {
856  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
857 }
858 
859 template<armnn::LayerType Type>
860 bool TestLayerTypeMatches()
861 {
862  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
863  using LayerType = typename LayerPolicy::Type;
864  using LayerDesc = typename LayerPolicy::Desc;
865  DummyLayer<LayerType, LayerDesc> layer;
866 
867  std::stringstream ss;
868  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
869  bool v = Type == layer.m_Layer->GetType();
870  BOOST_CHECK_MESSAGE(v, ss.str());
871  return v;
872 }
873 
874 template<armnn::LayerType Type>
875 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
876 {
877  return TestLayerTypeMatches<Type>();
878 }
879 
880 template<armnn::LayerType Type>
881 bool LayerTypeMatchesTestImpl(Tag<Type>)
882 {
883  return TestLayerTypeMatches<Type>() &&
884  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
885 }
886 
887 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
888 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
889 {
890  armnn::Graph graph;
891  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
892 
893  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
894  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
895 
896  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
897  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
898 
899  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
900  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
901  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
902  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
903 
904  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
905 
906  return result;
907 }
908 
909 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
910 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
911 {
912  armnn::Graph graph;
914 
915  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
916  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
917 
918  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
919 
920  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
921 
922  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
923  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
924 
925  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
926 
927  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
928  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
929 
930  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
931  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
932 
933  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
934  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
935 
936  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
937 
938  return result;
939 }
940 
941 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
942 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
943 {
944  armnn::Graph graph;
946 
947  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
948  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
949 
950  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
951 
952  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
953 
954  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
955  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
956 
957  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
958 
959  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
960  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
961 
962  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
963  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
964 
965  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
966  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
967 
968  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
969 
970  return result;
971 }
972 
973 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
974 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
975 {
976  armnn::Graph graph;
977  static const std::vector<unsigned> axes = {1, 0};
978  armnn::MeanDescriptor desc(axes, false);
979 
980  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
981 
982  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
983  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
984 
985  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
986  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
987 
988  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
989  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
990  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
991  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
992 
993  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
994 
995  return result;
996 }
997 
998 // Tests that IsMeanSupported fails when input tensor dimensions
999 // do not match output tensor dimensions when keepDims == true
1000 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1001 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1002 {
1003  armnn::Graph graph;
1004  static const std::vector<unsigned> axes = {};
1005  // Set keepDims == true
1006  armnn::MeanDescriptor desc(axes, true);
1007 
1008  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1009 
1010  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1011  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1012 
1013  // Mismatching number of tensor dimensions
1014  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
1015  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1016 
1017  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1018  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1019  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1020  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1021 
1022  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1023 
1024  return result;
1025 }
1026 
1027 template<typename FactoryType, armnn::DataType OutputDataType>
1028 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1029 {
1030  armnn::Graph graph;
1031 
1032  armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
1033  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
1034 
1035  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1036 
1037  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1038  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1039 
1040  bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
1041 
1042  return result;
1043 }
1044 
1045 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:14
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:310
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:449
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
Copyright (c) 2020 ARM Limited.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:311
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:202
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
Definition: Stack.cpp:12
This layer represents a memory copy operation.
Definition: MapLayer.hpp:13
This layer represents a memory copy operation.
Definition: UnmapLayer.hpp:13
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:313
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
Layer * m_Layer
DataType
Definition: Types.hpp:32
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:37
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis)
Definition: Gather.cpp:17
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:18
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:29
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:222
ClWorkloadFactory FactoryType
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
void Mean(const armnn::TensorInfo &inputInfo, const armnn::TensorInfo &outputInfo, const std::vector< unsigned int > &axis, Decoder< float > &input, Encoder< float > &output)
Definition: Mean.cpp:71
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:315
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:142
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
An input connection slot for a layer.
Definition: INetwork.hpp:24
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
A BatchNormalizationDescriptor for the BatchNormalizationLayer.