ArmNN
 20.05
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
10 
12 
13 namespace
14 {
15 armnn::Graph dummyGraph;
16 
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
20 {
21  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
22 }
23 
24 
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28 {
30 
31  for (unsigned int i=0; i < numInputs; i++)
32  {
33  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
34  }
35 
36  for (unsigned int o=0; o < numOutputs; o++)
37  {
38  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
39  }
40 
41  return info;
42 }
43 
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
46 struct DummyLayer
47 {
48  DummyLayer()
49  {
50  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
51  }
52 
53  ~DummyLayer()
54  {
55  dummyGraph.EraseLayer(m_Layer);
56  }
57 
59 };
60 
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
64 {
65  DummyLayer()
66  {
67  m_Layer = dummyGraph.AddLayer<LayerType>("");
68  }
69 
70  ~DummyLayer()
71  {
72  dummyGraph.EraseLayer(m_Layer);
73  }
74 
76 };
77 
78 template<>
79 struct DummyLayer<armnn::BatchNormalizationLayer>
80 {
81  DummyLayer()
82  {
84  m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
86  m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
88  m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
90  m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
92  }
93 
94  ~DummyLayer()
95  {
96  dummyGraph.EraseLayer(m_Layer);
97  }
98 
100 };
101 
102 template<>
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
104 {
105  DummyLayer()
106  {
108  }
109 
110  ~DummyLayer()
111  {
112  dummyGraph.EraseLayer(m_Layer);
113  }
114 
116 };
117 
118 template<>
119 struct DummyLayer<armnn::ConstantLayer, void>
120 {
121  DummyLayer()
122  {
123  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
124  }
125 
126  ~DummyLayer()
127  {
128  dummyGraph.EraseLayer(m_Layer);
129  }
130 
132 };
133 
134 template<>
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
136 {
137  DummyLayer()
138  {
140  }
141 
142  ~DummyLayer()
143  {
144  dummyGraph.EraseLayer(m_Layer);
145  }
146 
148 };
149 
150 template<>
151 struct DummyLayer<armnn::ConcatLayer>
152 {
153  DummyLayer()
154  {
155  armnn::OriginsDescriptor desc(2);
156  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
157  }
158 
159  ~DummyLayer()
160  {
161  dummyGraph.EraseLayer(m_Layer);
162  }
163 
165 };
166 
167 template<>
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
169 {
170  DummyLayer()
171  {
173  }
174 
175  ~DummyLayer()
176  {
177  dummyGraph.EraseLayer(m_Layer);
178  }
179 
181 };
182 
183 template<>
184 struct DummyLayer<armnn::SplitterLayer>
185 {
186  DummyLayer()
187  {
188  armnn::ViewsDescriptor desc(1);
189  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
190  }
191 
192  ~DummyLayer()
193  {
194  dummyGraph.EraseLayer(m_Layer);
195  }
196 
198 };
199 
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
202 {
203  DummyConvolutionLayer()
204  {
205  typename ConvolutionLayerType::DescriptorType desc;
206  desc.m_StrideX = 1;
207  desc.m_StrideY = 1;
208  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
209  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
211  m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
213  }
214 
215  ~DummyConvolutionLayer()
216  {
217  dummyGraph.EraseLayer(m_Layer);
218  }
219 
220  ConvolutionLayerType* m_Layer;
221 };
222 
223 template<>
224 struct DummyLayer<armnn::Convolution2dLayer>
225  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
226 {
227 };
228 
229 template<>
230 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
231  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
232 {
233 };
234 
235 template<>
236 struct DummyLayer<armnn::TransposeConvolution2dLayer>
237  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
238 {
239 };
240 
241 template<>
242 struct DummyLayer<armnn::DetectionPostProcessLayer>
243 {
244  DummyLayer()
245  {
247  m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
249  }
250 
251  ~DummyLayer()
252  {
253  dummyGraph.EraseLayer(m_Layer);
254  }
255 
257 };
258 
259 template <typename LstmLayerType>
260 struct DummyLstmLayer
261 {
262  DummyLstmLayer()
263  {
264  typename LstmLayerType::DescriptorType desc;
265  desc.m_CifgEnabled = false;
266 
267  m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
268  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
270  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
272  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
274  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
276  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
278  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
280  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
282  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
284  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
286 
287  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
289  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
291  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
293  }
294 
295  ~DummyLstmLayer()
296  {
297  dummyGraph.EraseLayer(m_Layer);
298  }
299 
301 };
302 
303 template<>
304 struct DummyLayer<armnn::LstmLayer>
305  : public DummyLstmLayer<armnn::LstmLayer>
306 {
307 };
308 
309 template <typename QLstmLayerType>
310 struct DummyQLstmLayer
311 {
312  DummyQLstmLayer()
313  {
314  typename QLstmLayerType::DescriptorType desc;
315  desc.m_CifgEnabled = false;
316  desc.m_PeepholeEnabled = true;
317  desc.m_ProjectionEnabled = true;
318  desc.m_LayerNormEnabled = true;
319 
320  m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
321 
322  // Basic params
323  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
325  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
327  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
329 
330  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
332  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
334  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
336 
337  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
339  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
341  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
343 
344  // CIFG optional params
345  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
347  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
349  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
351 
352  // Projection optional params
353  m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
355  m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
357 
358  // Peephole optional params
359  m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
361  m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
363  m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
365 
366  // Layer normalization optional params
367  m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
369  m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
371  m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
373  m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
375  }
376 
377  ~DummyQLstmLayer()
378  {
379  dummyGraph.EraseLayer(m_Layer);
380  }
381 
383 };
384 
385 template<>
386 struct DummyLayer<armnn::QuantizedLstmLayer, void>
387 {
388  DummyLayer()
389  {
390  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
391 
392  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
394  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
396  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
398  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
400 
401  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
403  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
405  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
407  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
409 
410  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
412  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
414  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
416  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
418  }
419 
420  ~DummyLayer()
421  {
422  dummyGraph.EraseLayer(m_Layer);
423  }
424 
426 };
427 
428 template<>
429 struct DummyLayer<armnn::FullyConnectedLayer>
430 {
431  DummyLayer()
432  {
434  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
435  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
437  }
438 
439  ~DummyLayer()
440  {
441  dummyGraph.EraseLayer(m_Layer);
442  }
443 
445 };
446 
447 // Tag for giving LayerType entries a unique strong type each.
448 template<armnn::LayerType>
449 struct Tag{};
450 
451 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
452 template<armnn::DataType DataType> \
453 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
454 { \
455  using Type = armnn::name##Layer; \
456  using Desc = descType; \
457  using QueueDesc = armnn::name##QueueDescriptor; \
458  constexpr static const char* NameStr = #name; \
459  constexpr static const bool IsException = false; \
460  \
461  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
462  unsigned int nIn, unsigned int nOut) \
463  { \
464  QueueDesc desc; \
465  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
466  return factory->Create##name(desc, info); \
467  } \
468 };
469 
470 // Define a layer policy specialization for use with the IsLayerSupported tests.
471 // Use this version for layers whose constructor takes 1 parameter(name).
472 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
473 
474 // Define a layer policy specialization for use with the IsLayerSupported tests.
475 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
476 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
477 
478 
479 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
480 template<armnn::DataType DataType> \
481 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
482 { \
483  using Type = armnn::name##Layer; \
484  using Desc = descType; \
485  constexpr static const char* NameStr = #name; \
486  constexpr static const bool IsException = true; \
487  \
488  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
489  unsigned int nIn, unsigned int nOut) \
490  { \
491  IgnoreUnused(factory, nIn, nOut); \
492  return std::unique_ptr<armnn::IWorkload>(); \
493  } \
494 };
495 
496 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
497 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
498 
499 // Layer policy template.
500 template<armnn::LayerType Type, armnn::DataType DataType>
501 struct LayerTypePolicy;
502 
503 // Every entry in the armnn::LayerType enum must be accounted for below.
505 
507 
509 
510 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
511 
513 
515 
517 
519 
520 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
521 
522 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
523 
524 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
525 
526 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
527 
528 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
529 
531 
533 
535 
537 
538 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
539 
541 
543 
544 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
545 
547 
549 
551 
553 
555 
556 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
557 
558 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
559 
561 
563 
565 
567 
569 
571 
572 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
573 
574 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
575 
577 
579 
581 
583 
585 
586 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
587 
589 
591 
592 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
593 
595 
597 
599 
601 
603 
605 
607 
609 
611 
613 
615 
616 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
617 
619 
621 
622 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
623 
624 
625 // Generic implementation to get the number of input slots for a given layer type;
626 template<armnn::LayerType Type>
627 unsigned int GetNumInputs(const armnn::Layer& layer)
628 {
629  return layer.GetNumInputSlots();
630 }
631 
632 // Generic implementation to get the number of output slots for a given layer type;
633 template<armnn::LayerType Type>
634 unsigned int GetNumOutputs(const armnn::Layer& layer)
635 {
636  return layer.GetNumOutputSlots();
637 }
638 
639 template<>
640 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
641 {
642  IgnoreUnused(layer);
643  return 2;
644 }
645 
646 // Tests that the IsLayerSupported() function returns the correct value.
647 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
648 // Returns true if expectations are met, otherwise returns false.
649 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
650 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
651 {
652  using LayerPolicy = LayerTypePolicy<Type, DataType>;
653  using LayerType = typename LayerPolicy::Type;
654  using LayerDesc = typename LayerPolicy::Desc;
655  DummyLayer<LayerType, LayerDesc> layer;
656 
657  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
658  {
659  return true;
660  }
661 
662  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
663  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
664 
665  // Make another dummy layer just to make IsLayerSupported have valid inputs.
666  DummyLayer<armnn::ConstantLayer, void> previousLayer;
667  // Set output of the previous layer to a dummy tensor.
668  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
669  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
670  // Connect all outputs of the previous layer to inputs of tested layer.
671  for (unsigned int i = 0; i < numIn; i++)
672  {
673  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
674  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
675  previousLayerOutputSlot.Connect(layerInputSlot);
676  }
677  // Set outputs of tested layer to a dummy tensor.
678  for (unsigned int i = 0; i < numOut; i++)
679  {
680  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
681  }
682 
683  std::string layerName = LayerPolicy::NameStr;
684  std::string reasonIfUnsupported;
685  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
686  {
687  std::string errorMsg = " layer expected support but found none.";
688  try
689  {
690  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
691  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
692  return retVal;
693  }
694  catch(const armnn::InvalidArgumentException& e)
695  {
696  IgnoreUnused(e);
697  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
698  return true;
699  }
700  catch(const std::exception& e)
701  {
702  errorMsg = e.what();
703  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
704  return false;
705  }
706  catch(...)
707  {
708  errorMsg = "Unexpected error while testing support for ";
709  BOOST_TEST_ERROR(errorMsg << layerName);
710  return false;
711  }
712  }
713  else
714  {
715  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
716  try
717  {
718  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
719  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
720  return retVal;
721  }
722  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
723  // using parameters that make IsLayerSupported() return false should throw an
724  // InvalidArgumentException or UnimplementedException.
725  catch(const armnn::InvalidArgumentException& e)
726  {
727  IgnoreUnused(e);
728  return true;
729  }
730  catch(const armnn::UnimplementedException& e)
731  {
732  IgnoreUnused(e);
733  return true;
734  }
735  catch(const std::exception& e)
736  {
737  errorMsg = e.what();
738  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
739  return false;
740  }
741  catch(...)
742  {
743  errorMsg = "Unexpected error while testing support for ";
744  BOOST_TEST_ERROR(errorMsg << layerName);
745  return false;
746  }
747  }
748 }
749 
750 // Helper function to compute the next type in the LayerType enum.
751 constexpr armnn::LayerType NextType(armnn::LayerType type)
752 {
753  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
754 }
755 
756 // Termination function for determining the end of the LayerType enumeration.
757 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
758 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
759 {
760  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
761 }
762 
763 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
764 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
765 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
766 {
767  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
768 
769  return v &&
770  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
771  (factory, Tag<NextType(Type)>());
772 }
773 
774 // Helper function to pass through to the test framework.
775 template<typename FactoryType, armnn::DataType DataType>
776 bool IsLayerSupportedTests(FactoryType *factory)
777 {
778  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
779 }
780 
781 template<armnn::LayerType Type>
782 bool TestLayerTypeMatches()
783 {
784  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
785  using LayerType = typename LayerPolicy::Type;
786  using LayerDesc = typename LayerPolicy::Desc;
787  DummyLayer<LayerType, LayerDesc> layer;
788 
789  std::stringstream ss;
790  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
791  bool v = Type == layer.m_Layer->GetType();
792  BOOST_CHECK_MESSAGE(v, ss.str());
793  return v;
794 }
795 
796 template<armnn::LayerType Type>
797 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
798 {
799  return TestLayerTypeMatches<Type>();
800 }
801 
802 template<armnn::LayerType Type>
803 bool LayerTypeMatchesTestImpl(Tag<Type>)
804 {
805  return TestLayerTypeMatches<Type>() &&
806  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
807 }
808 
809 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
810 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
811 {
812  armnn::Graph graph;
813  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
814 
815  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
816  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
817 
818  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
819  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
820 
821  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
822  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
823  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
824  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
825 
826  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
827 
828  return result;
829 }
830 
831 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
832 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
833 {
834  armnn::Graph graph;
835  static const std::vector<unsigned> axes = {1, 0};
836  armnn::MeanDescriptor desc(axes, false);
837 
838  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
839 
840  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
841  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
842 
843  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
844  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
845 
846  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
847  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
848  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
849  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
850 
851  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
852 
853  return result;
854 }
855 
856 // Tests that IsMeanSupported fails when input tensor dimensions
857 // do not match output tensor dimensions when keepDims == true
858 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
859 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
860 {
861  armnn::Graph graph;
862  static const std::vector<unsigned> axes = {};
863  // Set keepDims == true
864  armnn::MeanDescriptor desc(axes, true);
865 
866  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
867 
868  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
869  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
870 
871  // Mismatching number of tensor dimensions
872  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
873  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
874 
875  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
876  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
877  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
878  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
879 
880  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
881 
882  return result;
883 }
884 
885 template<typename FactoryType, armnn::DataType OutputDataType>
886 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
887 {
888  armnn::Graph graph;
889 
890  armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
891  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
892 
893  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
894 
895  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
896  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
897 
898  bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
899 
900  return result;
901 }
902 
903 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:47
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:16
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:307
void ArgMinMax(Decoder< float > &in, int32_t *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:15
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:398
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:443
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
This layer represents a detection postprocess operator.
Copyright (c) 2020 ARM Limited.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:308
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
Definition: Stack.cpp:12
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, std::vector< std::pair< unsigned int, unsigned int >> m_padList, const T *inputData, T *outData, const float padValue)
Definition: Pad.cpp:22
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:121
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
std::vector< TensorInfo > m_InputTensorInfos
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
Layer * m_Layer
DataType
Definition: Types.hpp:32
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:37
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:20
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:31
std::vector< TensorInfo > m_OutputTensorInfos
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners)
Definition: Resize.cpp:35
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:30
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
Definition: QLstmLayer.hpp:79
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ClWorkloadFactory FactoryType
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
void Mean(const armnn::TensorInfo &inputInfo, const armnn::TensorInfo &outputInfo, const std::vector< unsigned int > &axis, Decoder< float > &input, Encoder< float > &output)
Definition: Mean.cpp:71
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output)
Definition: Gather.cpp:18
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:143
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
An input connection slot for a layer.
Definition: INetwork.hpp:24
A BatchNormalizationDescriptor for the BatchNormalizationLayer.