ArmNN
 21.02
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
12 
14 
15 namespace
16 {
17 armnn::Graph dummyGraph;
18 
19 // Make a dummy TensorInfo object.
20 template<armnn::DataType DataType>
21 armnn::TensorInfo MakeDummyTensorInfo()
22 {
23  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
24 }
25 
26 
27 // Make a dummy WorkloadInfo using a dummy TensorInfo.
28 template<armnn::DataType DataType>
29 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
30 {
32 
33  for (unsigned int i=0; i < numInputs; i++)
34  {
35  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
36  }
37 
38  for (unsigned int o=0; o < numOutputs; o++)
39  {
40  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
41  }
42 
43  return info;
44 }
45 
46 // Template class to create a dummy layer (2 parameters).
47 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
48 struct DummyLayer
49 {
50  DummyLayer()
51  {
52  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
53  }
54 
55  ~DummyLayer()
56  {
57  dummyGraph.EraseLayer(m_Layer);
58  }
59 
61 };
62 
63 // Template class to create a dummy layer (1 parameter).
64 template<typename LayerType>
65 struct DummyLayer<LayerType, void>
66 {
67  DummyLayer()
68  {
69  m_Layer = dummyGraph.AddLayer<LayerType>("");
70  }
71 
72  ~DummyLayer()
73  {
74  dummyGraph.EraseLayer(m_Layer);
75  }
76 
78 };
79 
80 template<>
81 struct DummyLayer<armnn::BatchNormalizationLayer>
82 {
83  DummyLayer()
84  {
86  m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
88  m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
90  m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
92  m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
94  }
95 
96  ~DummyLayer()
97  {
98  dummyGraph.EraseLayer(m_Layer);
99  }
100 
102 };
103 
104 template<>
105 struct DummyLayer<armnn::BatchToSpaceNdLayer>
106 {
107  DummyLayer()
108  {
110  }
111 
112  ~DummyLayer()
113  {
114  dummyGraph.EraseLayer(m_Layer);
115  }
116 
118 };
119 
120 template<>
121 struct DummyLayer<armnn::ConstantLayer, void>
122 {
123  DummyLayer()
124  {
125  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
126  }
127 
128  ~DummyLayer()
129  {
130  dummyGraph.EraseLayer(m_Layer);
131  }
132 
134 };
135 
136 template<>
137 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
138 {
139  DummyLayer()
140  {
142  }
143 
144  ~DummyLayer()
145  {
146  dummyGraph.EraseLayer(m_Layer);
147  }
148 
150 };
151 
152 template<>
153 struct DummyLayer<armnn::ConcatLayer>
154 {
155  DummyLayer()
156  {
157  armnn::OriginsDescriptor desc(2);
158  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
159  }
160 
161  ~DummyLayer()
162  {
163  dummyGraph.EraseLayer(m_Layer);
164  }
165 
167 };
168 
169 template<>
170 struct DummyLayer<armnn::MapLayer, void>
171 {
172  DummyLayer()
173  {
174  m_Layer = dummyGraph.AddLayer<armnn::MapLayer>("");
175  }
176 
177  ~DummyLayer()
178  {
179  dummyGraph.EraseLayer(m_Layer);
180  }
181 
183 };
184 
185 template<>
186 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
187 {
188  DummyLayer()
189  {
191  }
192 
193  ~DummyLayer()
194  {
195  dummyGraph.EraseLayer(m_Layer);
196  }
197 
199 };
200 
201 template<>
202 struct DummyLayer<armnn::SplitterLayer>
203 {
204  DummyLayer()
205  {
206  armnn::ViewsDescriptor desc(1);
207  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
208  }
209 
210  ~DummyLayer()
211  {
212  dummyGraph.EraseLayer(m_Layer);
213  }
214 
216 };
217 
218 template<>
219 struct DummyLayer<armnn::UnmapLayer, void>
220 {
221  DummyLayer()
222  {
223  m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
224  }
225 
226  ~DummyLayer()
227  {
228  dummyGraph.EraseLayer(m_Layer);
229  }
230 
232 };
233 
234 template <typename ConvolutionLayerType>
235 struct DummyConvolutionLayer
236 {
237  DummyConvolutionLayer()
238  {
239  typename ConvolutionLayerType::DescriptorType desc;
240  desc.m_StrideX = 1;
241  desc.m_StrideY = 1;
242  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
243  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
245  m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
247  }
248 
249  ~DummyConvolutionLayer()
250  {
251  dummyGraph.EraseLayer(m_Layer);
252  }
253 
254  ConvolutionLayerType* m_Layer;
255 };
256 
257 template<>
258 struct DummyLayer<armnn::Convolution2dLayer>
259  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
260 {
261 };
262 
263 template<>
264 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
265  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
266 {
267 };
268 
269 template<>
270 struct DummyLayer<armnn::TransposeConvolution2dLayer>
271  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
272 {
273 };
274 
275 template<>
276 struct DummyLayer<armnn::DetectionPostProcessLayer>
277 {
278  DummyLayer()
279  {
281  m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
283  }
284 
285  ~DummyLayer()
286  {
287  dummyGraph.EraseLayer(m_Layer);
288  }
289 
291 };
292 
293 template <typename LstmLayerType>
294 struct DummyLstmLayer
295 {
296  DummyLstmLayer()
297  {
298  typename LstmLayerType::DescriptorType desc;
299  desc.m_CifgEnabled = false;
300 
301  m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
302  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
304  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
306  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
308  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
310  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
312  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
314  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
316  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
318  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
320 
321  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
323  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
325  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
327  }
328 
329  ~DummyLstmLayer()
330  {
331  dummyGraph.EraseLayer(m_Layer);
332  }
333 
335 };
336 
337 template<>
338 struct DummyLayer<armnn::LstmLayer>
339  : public DummyLstmLayer<armnn::LstmLayer>
340 {
341 };
342 
343 template <typename QLstmLayerType>
344 struct DummyQLstmLayer
345 {
346  DummyQLstmLayer()
347  {
348  typename QLstmLayerType::DescriptorType desc;
349  desc.m_CifgEnabled = false;
350  desc.m_PeepholeEnabled = true;
351  desc.m_ProjectionEnabled = true;
352  desc.m_LayerNormEnabled = true;
353 
354  m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
355 
356  // Basic params
357  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
359  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
361  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
363 
364  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
366  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
368  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
370 
371  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
373  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
375  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
377 
378  // CIFG optional params
379  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
381  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
383  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
385 
386  // Projection optional params
387  m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
389  m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
391 
392  // Peephole optional params
393  m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
395  m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
397  m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
399 
400  // Layer normalization optional params
401  m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
403  m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
405  m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
407  m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
409  }
410 
411  ~DummyQLstmLayer()
412  {
413  dummyGraph.EraseLayer(m_Layer);
414  }
415 
417 };
418 
419 template<>
420 struct DummyLayer<armnn::QuantizedLstmLayer, void>
421 {
422  DummyLayer()
423  {
424  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
425 
426  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
428  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
430  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
432  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
434 
435  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
437  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
439  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
441  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
443 
444  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
446  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
448  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
450  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
452  }
453 
454  ~DummyLayer()
455  {
456  dummyGraph.EraseLayer(m_Layer);
457  }
458 
460 };
461 
462 template<>
463 struct DummyLayer<armnn::FullyConnectedLayer>
464 {
465  DummyLayer()
466  {
468  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
469  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
471  }
472 
473  ~DummyLayer()
474  {
475  dummyGraph.EraseLayer(m_Layer);
476  }
477 
479 };
480 
481 // Tag for giving LayerType entries a unique strong type each.
482 template<armnn::LayerType>
483 struct Tag{};
484 
485 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
486 template<armnn::DataType DataType> \
487 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
488 { \
489  using Type = armnn::name##Layer; \
490  using Desc = descType; \
491  using QueueDesc = armnn::name##QueueDescriptor; \
492  constexpr static const char* NameStr = #name; \
493  constexpr static const bool IsException = false; \
494  \
495  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
496  unsigned int nIn, unsigned int nOut) \
497  { \
498  QueueDesc desc; \
499  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
500  return factory->Create##name(desc, info); \
501  } \
502 };
503 
504 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \
505 template<armnn::DataType DataType> \
506 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
507 { \
508  using Type = armnn::name##Layer; \
509  using Desc = descType; \
510  using QueueDesc = armnn::name##QueueDescriptor; \
511  using Workload = armnn::name##Workload; \
512  constexpr static const char* NameStr = #name; \
513  constexpr static const bool IsException = false; \
514  \
515  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \
516  unsigned int nIn, unsigned int nOut) \
517  { \
518  IgnoreUnused(factory); \
519  QueueDesc desc; \
520  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
521  return std::make_unique<armnn::name##Workload>(desc, info); \
522  } \
523 };
524 
525 // Define a layer policy specialization for use with the IsLayerSupported tests.
526 // Use this version for layers whose constructor takes 1 parameter(name).
527 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
528 
529 // Define a layer policy specialization for use with the IsLayerSupported tests.
530 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
531 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
532 
533 
534 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
535 template<armnn::DataType DataType> \
536 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
537 { \
538  using Type = armnn::name##Layer; \
539  using Desc = descType; \
540  constexpr static const char* NameStr = #name; \
541  constexpr static const bool IsException = true; \
542  \
543  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
544  unsigned int nIn, unsigned int nOut) \
545  { \
546  IgnoreUnused(factory, nIn, nOut); \
547  return std::unique_ptr<armnn::IWorkload>(); \
548  } \
549 };
550 
551 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
552 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
553 
554 // Layer policy template.
555 template<armnn::LayerType Type, armnn::DataType DataType>
556 struct LayerTypePolicy;
557 
558 // Every entry in the armnn::LayerType enum must be accounted for below.
560 
562 
564 
565 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
566 
568 
570 
572 
574 
575 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
576 
577 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
578 
579 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
580 
581 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
582 
583 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
584 
586 
588 
590 
592 
593 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
594 
596 
598 
599 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
600 
602 
604 
606 
608 
610 
612 
613 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
614 
615 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
616 
617 DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
618 
620 
622 
624 
626 
628 
630 
632 
633 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
634 
635 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
636 
638 
640 
642 
644 
646 
647 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
648 
651 
652 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
653 
655 
657 
659 
661 
663 
665 
667 
669 
671 
673 
675 
677 
678 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
679 
681 
683 
685 
686 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
687 
689 
690 
691 // Generic implementation to get the number of input slots for a given layer type;
692 template<armnn::LayerType Type>
693 unsigned int GetNumInputs(const armnn::Layer& layer)
694 {
695  return layer.GetNumInputSlots();
696 }
697 
698 // Generic implementation to get the number of output slots for a given layer type;
699 template<armnn::LayerType Type>
700 unsigned int GetNumOutputs(const armnn::Layer& layer)
701 {
702  return layer.GetNumOutputSlots();
703 }
704 
705 template<>
706 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
707 {
708  IgnoreUnused(layer);
709  return 2;
710 }
711 
712 // Tests that the IsLayerSupported() function returns the correct value.
713 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
714 // Returns true if expectations are met, otherwise returns false.
715 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
716 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
717 {
718  using LayerPolicy = LayerTypePolicy<Type, DataType>;
719  using LayerType = typename LayerPolicy::Type;
720  using LayerDesc = typename LayerPolicy::Desc;
721  DummyLayer<LayerType, LayerDesc> layer;
722 
723  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
724  {
725  return true;
726  }
727 
728  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
729  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
730 
731  // Make another dummy layer just to make IsLayerSupported have valid inputs.
732  DummyLayer<armnn::ConstantLayer, void> previousLayer;
733  // Set output of the previous layer to a dummy tensor.
734  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
735  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
736  // Connect all outputs of the previous layer to inputs of tested layer.
737  for (unsigned int i = 0; i < numIn; i++)
738  {
739  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
740  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
741  previousLayerOutputSlot.Connect(layerInputSlot);
742  }
743  // Set outputs of tested layer to a dummy tensor.
744  for (unsigned int i = 0; i < numOut; i++)
745  {
746  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
747  }
748 
749  std::string layerName = LayerPolicy::NameStr;
750  std::string reasonIfUnsupported;
751  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
752  {
753  std::string errorMsg = " layer expected support but found none.";
754  try
755  {
756  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
757  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
758  return retVal;
759  }
760  catch(const armnn::InvalidArgumentException& e)
761  {
762  IgnoreUnused(e);
763  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
764  return true;
765  }
766  catch(const std::exception& e)
767  {
768  errorMsg = e.what();
769  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
770  return false;
771  }
772  catch(...)
773  {
774  errorMsg = "Unexpected error while testing support for ";
775  BOOST_TEST_ERROR(errorMsg << layerName);
776  return false;
777  }
778  }
779  else
780  {
781  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
782  try
783  {
784  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
785  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
786  return retVal;
787  }
788  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
789  // using parameters that make IsLayerSupported() return false should throw an
790  // InvalidArgumentException or UnimplementedException.
791  catch(const armnn::InvalidArgumentException& e)
792  {
793  IgnoreUnused(e);
794  return true;
795  }
796  catch(const armnn::UnimplementedException& e)
797  {
798  IgnoreUnused(e);
799  return true;
800  }
801  catch(const std::exception& e)
802  {
803  errorMsg = e.what();
804  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
805  return false;
806  }
807  catch(...)
808  {
809  errorMsg = "Unexpected error while testing support for ";
810  BOOST_TEST_ERROR(errorMsg << layerName);
811  return false;
812  }
813  }
814 }
815 
816 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
817 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
818 {
819  IgnoreUnused(factory);
820  return true;
821 }
822 
823 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
824 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
825 {
826  IgnoreUnused(factory);
827  return true;
828 }
829 
830 // Helper function to compute the next type in the LayerType enum.
831 constexpr armnn::LayerType NextType(armnn::LayerType type)
832 {
833  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
834 }
835 
836 // Termination function for determining the end of the LayerType enumeration.
837 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
838 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
839 {
840  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
841 }
842 
843 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
844 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
845 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
846 {
847  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
848 
849  return v &&
850  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
851  (factory, Tag<NextType(Type)>());
852 }
853 
854 // Helper function to pass through to the test framework.
855 template<typename FactoryType, armnn::DataType DataType>
856 bool IsLayerSupportedTests(FactoryType *factory)
857 {
858  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
859 }
860 
861 template<armnn::LayerType Type>
862 bool TestLayerTypeMatches()
863 {
864  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
865  using LayerType = typename LayerPolicy::Type;
866  using LayerDesc = typename LayerPolicy::Desc;
867  DummyLayer<LayerType, LayerDesc> layer;
868 
869  std::stringstream ss;
870  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
871  bool v = Type == layer.m_Layer->GetType();
872  BOOST_CHECK_MESSAGE(v, ss.str());
873  return v;
874 }
875 
876 template<armnn::LayerType Type>
877 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
878 {
879  return TestLayerTypeMatches<Type>();
880 }
881 
882 template<armnn::LayerType Type>
883 bool LayerTypeMatchesTestImpl(Tag<Type>)
884 {
885  return TestLayerTypeMatches<Type>() &&
886  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
887 }
888 
889 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
890 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
891 {
892  armnn::Graph graph;
893  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
894 
895  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
896  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
897 
898  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
899  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
900 
901  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
902  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
903  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
904  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
905 
906  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
907 
908  return result;
909 }
910 
911 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
912 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
913 {
914  armnn::Graph graph;
916 
917  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
918  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
919 
920  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
921 
922  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
923 
924  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
925  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
926 
927  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
928 
929  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
930  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
931 
932  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
933  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
934 
935  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
936  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
937 
938  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
939 
940  return result;
941 }
942 
943 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
944 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
945 {
946  armnn::Graph graph;
948 
949  armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
950  armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
951 
952  armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
953 
954  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
955 
956  armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
957  armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
958 
959  armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
960 
961  input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
962  input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
963 
964  input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
965  input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
966 
967  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
968  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
969 
970  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
971 
972  return result;
973 }
974 
975 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
976 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
977 {
978  armnn::Graph graph;
979  static const std::vector<unsigned> axes = {1, 0};
980  armnn::MeanDescriptor desc(axes, false);
981 
982  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
983 
984  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
985  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
986 
987  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
988  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
989 
990  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
991  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
992  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
993  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
994 
995  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
996 
997  return result;
998 }
999 
1000 // Tests that IsMeanSupported fails when input tensor dimensions
1001 // do not match output tensor dimensions when keepDims == true
1002 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1003 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1004 {
1005  armnn::Graph graph;
1006  static const std::vector<unsigned> axes = {};
1007  // Set keepDims == true
1008  armnn::MeanDescriptor desc(axes, true);
1009 
1010  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1011 
1012  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1013  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1014 
1015  // Mismatching number of tensor dimensions
1016  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
1017  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1018 
1019  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1020  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1021  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1022  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1023 
1024  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1025 
1026  return result;
1027 }
1028 
1029 template<typename FactoryType, armnn::DataType OutputDataType>
1030 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1031 {
1032  armnn::Graph graph;
1033 
1034  armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
1035  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
1036 
1037  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1038 
1039  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1040  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1041 
1042  bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
1043 
1044  return result;
1045 }
1046 
1047 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:14
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
Definition: Reduce.cpp:71
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:449
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
Copyright (c) 2021 ARM Limited and Contributors.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
Definition: Stack.cpp:12
This layer represents a memory copy operation.
Definition: MapLayer.hpp:13
This layer represents a memory copy operation.
Definition: UnmapLayer.hpp:13
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
Layer * m_Layer
DataType
Definition: Types.hpp:32
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:38
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis)
Definition: Gather.cpp:17
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:18
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:29
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
ClWorkloadFactory FactoryType
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:142
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
An input connection slot for a layer.
Definition: INetwork.hpp:25
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:419