ArmNN
 20.08
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
10 
12 
13 namespace
14 {
15 armnn::Graph dummyGraph;
16 
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
20 {
21  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
22 }
23 
24 
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28 {
30 
31  for (unsigned int i=0; i < numInputs; i++)
32  {
33  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
34  }
35 
36  for (unsigned int o=0; o < numOutputs; o++)
37  {
38  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
39  }
40 
41  return info;
42 }
43 
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
46 struct DummyLayer
47 {
48  DummyLayer()
49  {
50  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
51  }
52 
53  ~DummyLayer()
54  {
55  dummyGraph.EraseLayer(m_Layer);
56  }
57 
59 };
60 
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
64 {
65  DummyLayer()
66  {
67  m_Layer = dummyGraph.AddLayer<LayerType>("");
68  }
69 
70  ~DummyLayer()
71  {
72  dummyGraph.EraseLayer(m_Layer);
73  }
74 
76 };
77 
78 template<>
79 struct DummyLayer<armnn::BatchNormalizationLayer>
80 {
81  DummyLayer()
82  {
84  m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
86  m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
88  m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
90  m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
92  }
93 
94  ~DummyLayer()
95  {
96  dummyGraph.EraseLayer(m_Layer);
97  }
98 
100 };
101 
102 template<>
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
104 {
105  DummyLayer()
106  {
108  }
109 
110  ~DummyLayer()
111  {
112  dummyGraph.EraseLayer(m_Layer);
113  }
114 
116 };
117 
118 template<>
119 struct DummyLayer<armnn::ConstantLayer, void>
120 {
121  DummyLayer()
122  {
123  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
124  }
125 
126  ~DummyLayer()
127  {
128  dummyGraph.EraseLayer(m_Layer);
129  }
130 
132 };
133 
134 template<>
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
136 {
137  DummyLayer()
138  {
140  }
141 
142  ~DummyLayer()
143  {
144  dummyGraph.EraseLayer(m_Layer);
145  }
146 
148 };
149 
150 template<>
151 struct DummyLayer<armnn::ConcatLayer>
152 {
153  DummyLayer()
154  {
155  armnn::OriginsDescriptor desc(2);
156  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
157  }
158 
159  ~DummyLayer()
160  {
161  dummyGraph.EraseLayer(m_Layer);
162  }
163 
165 };
166 
167 template<>
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
169 {
170  DummyLayer()
171  {
173  }
174 
175  ~DummyLayer()
176  {
177  dummyGraph.EraseLayer(m_Layer);
178  }
179 
181 };
182 
183 template<>
184 struct DummyLayer<armnn::SplitterLayer>
185 {
186  DummyLayer()
187  {
188  armnn::ViewsDescriptor desc(1);
189  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
190  }
191 
192  ~DummyLayer()
193  {
194  dummyGraph.EraseLayer(m_Layer);
195  }
196 
198 };
199 
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
202 {
203  DummyConvolutionLayer()
204  {
205  typename ConvolutionLayerType::DescriptorType desc;
206  desc.m_StrideX = 1;
207  desc.m_StrideY = 1;
208  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
209  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
211  m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
213  }
214 
215  ~DummyConvolutionLayer()
216  {
217  dummyGraph.EraseLayer(m_Layer);
218  }
219 
220  ConvolutionLayerType* m_Layer;
221 };
222 
223 template<>
224 struct DummyLayer<armnn::Convolution2dLayer>
225  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
226 {
227 };
228 
229 template<>
230 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
231  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
232 {
233 };
234 
235 template<>
236 struct DummyLayer<armnn::TransposeConvolution2dLayer>
237  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
238 {
239 };
240 
241 template<>
242 struct DummyLayer<armnn::DetectionPostProcessLayer>
243 {
244  DummyLayer()
245  {
247  m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
249  }
250 
251  ~DummyLayer()
252  {
253  dummyGraph.EraseLayer(m_Layer);
254  }
255 
257 };
258 
259 template <typename LstmLayerType>
260 struct DummyLstmLayer
261 {
262  DummyLstmLayer()
263  {
264  typename LstmLayerType::DescriptorType desc;
265  desc.m_CifgEnabled = false;
266 
267  m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
268  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
270  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
272  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
274  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
276  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
278  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
280  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
282  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
284  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
286 
287  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
289  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
291  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
293  }
294 
295  ~DummyLstmLayer()
296  {
297  dummyGraph.EraseLayer(m_Layer);
298  }
299 
301 };
302 
303 template<>
304 struct DummyLayer<armnn::LstmLayer>
305  : public DummyLstmLayer<armnn::LstmLayer>
306 {
307 };
308 
309 template <typename QLstmLayerType>
310 struct DummyQLstmLayer
311 {
312  DummyQLstmLayer()
313  {
314  typename QLstmLayerType::DescriptorType desc;
315  desc.m_CifgEnabled = false;
316  desc.m_PeepholeEnabled = true;
317  desc.m_ProjectionEnabled = true;
318  desc.m_LayerNormEnabled = true;
319 
320  m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
321 
322  // Basic params
323  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
325  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
327  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
329 
330  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
332  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
334  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
336 
337  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
339  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
341  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
343 
344  // CIFG optional params
345  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
347  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
349  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
351 
352  // Projection optional params
353  m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
355  m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
357 
358  // Peephole optional params
359  m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
361  m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
363  m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
365 
366  // Layer normalization optional params
367  m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
369  m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
371  m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
373  m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
375  }
376 
377  ~DummyQLstmLayer()
378  {
379  dummyGraph.EraseLayer(m_Layer);
380  }
381 
383 };
384 
385 template<>
386 struct DummyLayer<armnn::QuantizedLstmLayer, void>
387 {
388  DummyLayer()
389  {
390  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
391 
392  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
394  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
396  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
398  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
400 
401  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
403  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
405  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
407  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
409 
410  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
412  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
414  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
416  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
418  }
419 
420  ~DummyLayer()
421  {
422  dummyGraph.EraseLayer(m_Layer);
423  }
424 
426 };
427 
428 template<>
429 struct DummyLayer<armnn::FullyConnectedLayer>
430 {
431  DummyLayer()
432  {
434  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
435  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
437  }
438 
439  ~DummyLayer()
440  {
441  dummyGraph.EraseLayer(m_Layer);
442  }
443 
445 };
446 
447 // Tag for giving LayerType entries a unique strong type each.
448 template<armnn::LayerType>
449 struct Tag{};
450 
451 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
452 template<armnn::DataType DataType> \
453 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
454 { \
455  using Type = armnn::name##Layer; \
456  using Desc = descType; \
457  using QueueDesc = armnn::name##QueueDescriptor; \
458  constexpr static const char* NameStr = #name; \
459  constexpr static const bool IsException = false; \
460  \
461  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
462  unsigned int nIn, unsigned int nOut) \
463  { \
464  QueueDesc desc; \
465  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
466  return factory->Create##name(desc, info); \
467  } \
468 };
469 
470 // Define a layer policy specialization for use with the IsLayerSupported tests.
471 // Use this version for layers whose constructor takes 1 parameter(name).
472 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
473 
474 // Define a layer policy specialization for use with the IsLayerSupported tests.
475 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
476 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
477 
478 
479 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
480 template<armnn::DataType DataType> \
481 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
482 { \
483  using Type = armnn::name##Layer; \
484  using Desc = descType; \
485  constexpr static const char* NameStr = #name; \
486  constexpr static const bool IsException = true; \
487  \
488  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
489  unsigned int nIn, unsigned int nOut) \
490  { \
491  IgnoreUnused(factory, nIn, nOut); \
492  return std::unique_ptr<armnn::IWorkload>(); \
493  } \
494 };
495 
496 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
497 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
498 
499 // Layer policy template.
500 template<armnn::LayerType Type, armnn::DataType DataType>
501 struct LayerTypePolicy;
502 
503 // Every entry in the armnn::LayerType enum must be accounted for below.
505 
507 
509 
510 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
511 
513 
515 
517 
519 
520 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
521 
522 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
523 
524 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
525 
526 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
527 
528 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
529 
531 
533 
535 
537 
538 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
539 
541 
543 
544 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
545 
547 
549 
551 
553 
555 
557 
558 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
559 
560 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
561 
563 
565 
567 
569 
571 
573 
574 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
575 
576 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
577 
579 
581 
583 
585 
587 
588 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
589 
591 
593 
594 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
595 
597 
599 
601 
603 
605 
607 
609 
611 
613 
615 
617 
619 
620 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
621 
623 
625 
626 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
627 
628 
629 // Generic implementation to get the number of input slots for a given layer type;
630 template<armnn::LayerType Type>
631 unsigned int GetNumInputs(const armnn::Layer& layer)
632 {
633  return layer.GetNumInputSlots();
634 }
635 
636 // Generic implementation to get the number of output slots for a given layer type;
637 template<armnn::LayerType Type>
638 unsigned int GetNumOutputs(const armnn::Layer& layer)
639 {
640  return layer.GetNumOutputSlots();
641 }
642 
643 template<>
644 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
645 {
646  IgnoreUnused(layer);
647  return 2;
648 }
649 
650 // Tests that the IsLayerSupported() function returns the correct value.
651 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
652 // Returns true if expectations are met, otherwise returns false.
653 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
654 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
655 {
656  using LayerPolicy = LayerTypePolicy<Type, DataType>;
657  using LayerType = typename LayerPolicy::Type;
658  using LayerDesc = typename LayerPolicy::Desc;
659  DummyLayer<LayerType, LayerDesc> layer;
660 
661  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
662  {
663  return true;
664  }
665 
666  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
667  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
668 
669  // Make another dummy layer just to make IsLayerSupported have valid inputs.
670  DummyLayer<armnn::ConstantLayer, void> previousLayer;
671  // Set output of the previous layer to a dummy tensor.
672  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
673  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
674  // Connect all outputs of the previous layer to inputs of tested layer.
675  for (unsigned int i = 0; i < numIn; i++)
676  {
677  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
678  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
679  previousLayerOutputSlot.Connect(layerInputSlot);
680  }
681  // Set outputs of tested layer to a dummy tensor.
682  for (unsigned int i = 0; i < numOut; i++)
683  {
684  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
685  }
686 
687  std::string layerName = LayerPolicy::NameStr;
688  std::string reasonIfUnsupported;
689  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
690  {
691  std::string errorMsg = " layer expected support but found none.";
692  try
693  {
694  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
695  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
696  return retVal;
697  }
698  catch(const armnn::InvalidArgumentException& e)
699  {
700  IgnoreUnused(e);
701  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
702  return true;
703  }
704  catch(const std::exception& e)
705  {
706  errorMsg = e.what();
707  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
708  return false;
709  }
710  catch(...)
711  {
712  errorMsg = "Unexpected error while testing support for ";
713  BOOST_TEST_ERROR(errorMsg << layerName);
714  return false;
715  }
716  }
717  else
718  {
719  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
720  try
721  {
722  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
723  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
724  return retVal;
725  }
726  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
727  // using parameters that make IsLayerSupported() return false should throw an
728  // InvalidArgumentException or UnimplementedException.
729  catch(const armnn::InvalidArgumentException& e)
730  {
731  IgnoreUnused(e);
732  return true;
733  }
734  catch(const armnn::UnimplementedException& e)
735  {
736  IgnoreUnused(e);
737  return true;
738  }
739  catch(const std::exception& e)
740  {
741  errorMsg = e.what();
742  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
743  return false;
744  }
745  catch(...)
746  {
747  errorMsg = "Unexpected error while testing support for ";
748  BOOST_TEST_ERROR(errorMsg << layerName);
749  return false;
750  }
751  }
752 }
753 
754 // Helper function to compute the next type in the LayerType enum.
755 constexpr armnn::LayerType NextType(armnn::LayerType type)
756 {
757  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
758 }
759 
760 // Termination function for determining the end of the LayerType enumeration.
761 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
762 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
763 {
764  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
765 }
766 
767 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
768 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
769 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
770 {
771  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
772 
773  return v &&
774  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
775  (factory, Tag<NextType(Type)>());
776 }
777 
778 // Helper function to pass through to the test framework.
779 template<typename FactoryType, armnn::DataType DataType>
780 bool IsLayerSupportedTests(FactoryType *factory)
781 {
782  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
783 }
784 
785 template<armnn::LayerType Type>
786 bool TestLayerTypeMatches()
787 {
788  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
789  using LayerType = typename LayerPolicy::Type;
790  using LayerDesc = typename LayerPolicy::Desc;
791  DummyLayer<LayerType, LayerDesc> layer;
792 
793  std::stringstream ss;
794  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
795  bool v = Type == layer.m_Layer->GetType();
796  BOOST_CHECK_MESSAGE(v, ss.str());
797  return v;
798 }
799 
800 template<armnn::LayerType Type>
801 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
802 {
803  return TestLayerTypeMatches<Type>();
804 }
805 
806 template<armnn::LayerType Type>
807 bool LayerTypeMatchesTestImpl(Tag<Type>)
808 {
809  return TestLayerTypeMatches<Type>() &&
810  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
811 }
812 
813 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
814 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
815 {
816  armnn::Graph graph;
817  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
818 
819  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
820  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
821 
822  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
823  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
824 
825  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
826  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
827  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
828  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
829 
830  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
831 
832  return result;
833 }
834 
835 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
836 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
837 {
838  armnn::Graph graph;
839  static const std::vector<unsigned> axes = {1, 0};
840  armnn::MeanDescriptor desc(axes, false);
841 
842  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
843 
844  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
845  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
846 
847  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
848  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
849 
850  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
851  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
852  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
853  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
854 
855  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
856 
857  return result;
858 }
859 
860 // Tests that IsMeanSupported fails when input tensor dimensions
861 // do not match output tensor dimensions when keepDims == true
862 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
863 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
864 {
865  armnn::Graph graph;
866  static const std::vector<unsigned> axes = {};
867  // Set keepDims == true
868  armnn::MeanDescriptor desc(axes, true);
869 
870  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
871 
872  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
873  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
874 
875  // Mismatching number of tensor dimensions
876  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
877  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
878 
879  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
880  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
881  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
882  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
883 
884  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
885 
886  return result;
887 }
888 
889 template<typename FactoryType, armnn::DataType OutputDataType>
890 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
891 {
892  armnn::Graph graph;
893 
894  armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
895  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
896 
897  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
898 
899  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
900  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
901 
902  bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
903 
904  return result;
905 }
906 
907 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:47
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:16
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:309
void ArgMinMax(Decoder< float > &in, int32_t *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:15
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:450
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
This layer represents a detection postprocess operator.
Copyright (c) 2020 ARM Limited.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:310
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:194
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
Definition: Stack.cpp:12
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
std::vector< TensorInfo > m_InputTensorInfos
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
Layer * m_Layer
DataType
Definition: Types.hpp:32
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:37
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis)
Definition: Gather.cpp:18
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:20
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:31
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:30
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ClWorkloadFactory FactoryType
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
void Mean(const armnn::TensorInfo &inputInfo, const armnn::TensorInfo &outputInfo, const std::vector< unsigned int > &axis, Decoder< float > &input, Encoder< float > &output)
Definition: Mean.cpp:71
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:143
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
An input connection slot for a layer.
Definition: INetwork.hpp:24
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
A BatchNormalizationDescriptor for the BatchNormalizationLayer.