ArmNN
 20.02
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
10 
12 
13 namespace
14 {
15 armnn::Graph dummyGraph;
16 
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
20 {
21  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
22 }
23 
24 
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28 {
30 
31  for (unsigned int i=0; i < numInputs; i++)
32  {
33  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
34  }
35 
36  for (unsigned int o=0; o < numOutputs; o++)
37  {
38  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
39  }
40 
41  return info;
42 }
43 
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
46 struct DummyLayer
47 {
48  DummyLayer()
49  {
50  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
51  }
52 
53  ~DummyLayer()
54  {
55  dummyGraph.EraseLayer(m_Layer);
56  }
57 
59 };
60 
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
64 {
65  DummyLayer()
66  {
67  m_Layer = dummyGraph.AddLayer<LayerType>("");
68  }
69 
70  ~DummyLayer()
71  {
72  dummyGraph.EraseLayer(m_Layer);
73  }
74 
76 };
77 
78 template<>
79 struct DummyLayer<armnn::BatchNormalizationLayer>
80 {
81  DummyLayer()
82  {
84  m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
86  m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
88  m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
90  m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
92  }
93 
94  ~DummyLayer()
95  {
96  dummyGraph.EraseLayer(m_Layer);
97  }
98 
100 };
101 
102 template<>
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
104 {
105  DummyLayer()
106  {
108  }
109 
110  ~DummyLayer()
111  {
112  dummyGraph.EraseLayer(m_Layer);
113  }
114 
116 };
117 
118 template<>
119 struct DummyLayer<armnn::ConstantLayer, void>
120 {
121  DummyLayer()
122  {
123  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
124  }
125 
126  ~DummyLayer()
127  {
128  dummyGraph.EraseLayer(m_Layer);
129  }
130 
132 };
133 
134 template<>
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
136 {
137  DummyLayer()
138  {
140  }
141 
142  ~DummyLayer()
143  {
144  dummyGraph.EraseLayer(m_Layer);
145  }
146 
148 };
149 
150 template<>
151 struct DummyLayer<armnn::ConcatLayer>
152 {
153  DummyLayer()
154  {
155  armnn::OriginsDescriptor desc(2);
156  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
157  }
158 
159  ~DummyLayer()
160  {
161  dummyGraph.EraseLayer(m_Layer);
162  }
163 
165 };
166 
167 template<>
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
169 {
170  DummyLayer()
171  {
173  }
174 
175  ~DummyLayer()
176  {
177  dummyGraph.EraseLayer(m_Layer);
178  }
179 
181 };
182 
183 template<>
184 struct DummyLayer<armnn::SplitterLayer>
185 {
186  DummyLayer()
187  {
188  armnn::ViewsDescriptor desc(1);
189  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
190  }
191 
192  ~DummyLayer()
193  {
194  dummyGraph.EraseLayer(m_Layer);
195  }
196 
198 };
199 
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
202 {
203  DummyConvolutionLayer()
204  {
205  typename ConvolutionLayerType::DescriptorType desc;
206  desc.m_StrideX = 1;
207  desc.m_StrideY = 1;
208  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
209  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
211  m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
213  }
214 
215  ~DummyConvolutionLayer()
216  {
217  dummyGraph.EraseLayer(m_Layer);
218  }
219 
220  ConvolutionLayerType* m_Layer;
221 };
222 
223 template<>
224 struct DummyLayer<armnn::Convolution2dLayer>
225  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
226 {
227 };
228 
229 template<>
230 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
231  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
232 {
233 };
234 
235 template<>
236 struct DummyLayer<armnn::TransposeConvolution2dLayer>
237  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
238 {
239 };
240 
241 template<>
242 struct DummyLayer<armnn::DetectionPostProcessLayer>
243 {
244  DummyLayer()
245  {
247  m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
249  }
250 
251  ~DummyLayer()
252  {
253  dummyGraph.EraseLayer(m_Layer);
254  }
255 
257 };
258 
259 template <typename LstmLayerType>
260 struct DummyLstmLayer
261 {
262  DummyLstmLayer()
263  {
264  typename LstmLayerType::DescriptorType desc;
265  desc.m_CifgEnabled = false;
266 
267  m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
268  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
270  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
272  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
274  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
276  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
278  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
280  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
282  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
284  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
286 
287  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
289  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
291  m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
293  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
295  }
296 
297  ~DummyLstmLayer()
298  {
299  dummyGraph.EraseLayer(m_Layer);
300  }
301 
303 };
304 
305 template<>
306 struct DummyLayer<armnn::LstmLayer>
307  : public DummyLstmLayer<armnn::LstmLayer>
308 {
309 };
310 
311 template<>
312 struct DummyLayer<armnn::QuantizedLstmLayer, void>
313 {
314  DummyLayer()
315  {
316  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
317 
318  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
320  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
322  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
324  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
326 
327  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
329  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
331  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
333  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
335 
336  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
338  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
340  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
342  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
344  }
345 
346  ~DummyLayer()
347  {
348  dummyGraph.EraseLayer(m_Layer);
349  }
350 
352 };
353 
354 template<>
355 struct DummyLayer<armnn::FullyConnectedLayer>
356 {
357  DummyLayer()
358  {
360  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
361  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
363  }
364 
365  ~DummyLayer()
366  {
367  dummyGraph.EraseLayer(m_Layer);
368  }
369 
371 };
372 
373 // Tag for giving LayerType entries a unique strong type each.
374 template<armnn::LayerType>
375 struct Tag{};
376 
377 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
378 template<armnn::DataType DataType> \
379 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
380 { \
381  using Type = armnn::name##Layer; \
382  using Desc = descType; \
383  using QueueDesc = armnn::name##QueueDescriptor; \
384  constexpr static const char* NameStr = #name; \
385  constexpr static const bool IsException = false; \
386  \
387  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
388  unsigned int nIn, unsigned int nOut) \
389  { \
390  QueueDesc desc; \
391  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
392  return factory->Create##name(desc, info); \
393  } \
394 };
395 
396 // Define a layer policy specialization for use with the IsLayerSupported tests.
397 // Use this version for layers whose constructor takes 1 parameter(name).
398 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
399 
400 // Define a layer policy specialization for use with the IsLayerSupported tests.
401 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
402 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
403 
404 
405 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
406 template<armnn::DataType DataType> \
407 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
408 { \
409  using Type = armnn::name##Layer; \
410  using Desc = descType; \
411  constexpr static const char* NameStr = #name; \
412  constexpr static const bool IsException = true; \
413  \
414  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
415  unsigned int nIn, unsigned int nOut) \
416  { \
417  IgnoreUnused(factory, nIn, nOut); \
418  return std::unique_ptr<armnn::IWorkload>(); \
419  } \
420 };
421 
422 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
423 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
424 
425 // Layer policy template.
426 template<armnn::LayerType Type, armnn::DataType DataType>
427 struct LayerTypePolicy;
428 
429 // Every entry in the armnn::LayerType enum must be accounted for below.
431 
433 
435 
436 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
437 
439 
441 
443 
445 
446 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
447 
448 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
449 
450 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
451 
453 
455 
457 
459 
460 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
461 
463 
465 
466 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
467 
469 
471 
473 
475 
477 
478 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
479 
480 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
481 
483 
485 
487 
489 
491 
493 
494 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
495 
496 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
497 
499 
501 
503 
505 
507 
508 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
509 
511 
512 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
513 
515 
517 
519 
521 
523 
525 
527 
529 
531 
533 
535 
536 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
537 
539 
541 
542 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
543 
544 
545 // Generic implementation to get the number of input slots for a given layer type;
546 template<armnn::LayerType Type>
547 unsigned int GetNumInputs(const armnn::Layer& layer)
548 {
549  return layer.GetNumInputSlots();
550 }
551 
552 // Generic implementation to get the number of output slots for a given layer type;
553 template<armnn::LayerType Type>
554 unsigned int GetNumOutputs(const armnn::Layer& layer)
555 {
556  return layer.GetNumOutputSlots();
557 }
558 
559 template<>
560 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
561 {
562  IgnoreUnused(layer);
563  return 2;
564 }
565 
566 // Tests that the IsLayerSupported() function returns the correct value.
567 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
568 // Returns true if expectations are met, otherwise returns false.
569 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
570 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
571 {
572  using LayerPolicy = LayerTypePolicy<Type, DataType>;
573  using LayerType = typename LayerPolicy::Type;
574  using LayerDesc = typename LayerPolicy::Desc;
575  DummyLayer<LayerType, LayerDesc> layer;
576 
577  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
578  {
579  return true;
580  }
581 
582  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
583  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
584 
585  // Make another dummy layer just to make IsLayerSupported have valid inputs.
586  DummyLayer<armnn::ConstantLayer, void> previousLayer;
587  // Set output of the previous layer to a dummy tensor.
588  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
589  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
590  // Connect all outputs of the previous layer to inputs of tested layer.
591  for (unsigned int i = 0; i < numIn; i++)
592  {
593  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
594  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
595  previousLayerOutputSlot.Connect(layerInputSlot);
596  }
597  // Set outputs of tested layer to a dummy tensor.
598  for (unsigned int i = 0; i < numOut; i++)
599  {
600  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
601  }
602 
603  std::string layerName = LayerPolicy::NameStr;
604  std::string reasonIfUnsupported;
605  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
606  {
607  std::string errorMsg = " layer expected support but found none.";
608  try
609  {
610  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
611  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
612  return retVal;
613  }
614  catch(const armnn::InvalidArgumentException& e)
615  {
616  IgnoreUnused(e);
617  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
618  return true;
619  }
620  catch(const std::exception& e)
621  {
622  errorMsg = e.what();
623  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
624  return false;
625  }
626  catch(...)
627  {
628  errorMsg = "Unexpected error while testing support for ";
629  BOOST_TEST_ERROR(errorMsg << layerName);
630  return false;
631  }
632  }
633  else
634  {
635  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
636  try
637  {
638  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
639  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
640  return retVal;
641  }
642  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
643  // using parameters that make IsLayerSupported() return false should throw an
644  // InvalidArgumentException or UnimplementedException.
645  catch(const armnn::InvalidArgumentException& e)
646  {
647  IgnoreUnused(e);
648  return true;
649  }
650  catch(const armnn::UnimplementedException& e)
651  {
652  IgnoreUnused(e);
653  return true;
654  }
655  catch(const std::exception& e)
656  {
657  errorMsg = e.what();
658  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
659  return false;
660  }
661  catch(...)
662  {
663  errorMsg = "Unexpected error while testing support for ";
664  BOOST_TEST_ERROR(errorMsg << layerName);
665  return false;
666  }
667  }
668 }
669 
670 // Helper function to compute the next type in the LayerType enum.
671 constexpr armnn::LayerType NextType(armnn::LayerType type)
672 {
673  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
674 }
675 
676 // Termination function for determining the end of the LayerType enumeration.
677 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
678 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
679 {
680  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
681 }
682 
683 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
684 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
685 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
686 {
687  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
688 
689  return v &&
690  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
691  (factory, Tag<NextType(Type)>());
692 }
693 
694 // Helper function to pass through to the test framework.
695 template<typename FactoryType, armnn::DataType DataType>
696 bool IsLayerSupportedTests(FactoryType *factory)
697 {
698  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
699 }
700 
701 template<armnn::LayerType Type>
702 bool TestLayerTypeMatches()
703 {
704  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
705  using LayerType = typename LayerPolicy::Type;
706  using LayerDesc = typename LayerPolicy::Desc;
707  DummyLayer<LayerType, LayerDesc> layer;
708 
709  std::stringstream ss;
710  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
711  bool v = Type == layer.m_Layer->GetType();
712  BOOST_CHECK_MESSAGE(v, ss.str());
713  return v;
714 }
715 
716 template<armnn::LayerType Type>
717 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
718 {
719  return TestLayerTypeMatches<Type>();
720 }
721 
722 template<armnn::LayerType Type>
723 bool LayerTypeMatchesTestImpl(Tag<Type>)
724 {
725  return TestLayerTypeMatches<Type>() &&
726  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
727 }
728 
729 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
730 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
731 {
732  armnn::Graph graph;
733  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
734 
735  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
736  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
737 
738  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
739  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
740 
741  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
742  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
743  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
744  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
745 
746  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
747 
748  return result;
749 }
750 
751 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
752 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
753 {
754  armnn::Graph graph;
755  static const std::vector<unsigned> axes = {1, 0};
756  armnn::MeanDescriptor desc(axes, false);
757 
758  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
759 
760  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
761  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
762 
763  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
764  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
765 
766  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
767  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
768  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
769  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
770 
771  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
772 
773  return result;
774 }
775 
776 // Tests that IsMeanSupported fails when input tensor dimensions
777 // do not match output tensor dimensions when keepDims == true
778 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
779 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
780 {
781  armnn::Graph graph;
782  static const std::vector<unsigned> axes = {};
783  // Set keepDims == true
784  armnn::MeanDescriptor desc(axes, true);
785 
786  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
787 
788  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
789  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
790 
791  // Mismatching number of tensor dimensions
792  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
793  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
794 
795  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
796  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
797  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
798  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
799 
800  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
801 
802  return result;
803 }
804 
805 
806 } //namespace
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:47
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:16
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:307
void ArgMinMax(Decoder< float > &in, int32_t *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:15
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:442
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
This layer represents a detection postprocess operator.
Copyright (c) 2020 ARM Limited.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:308
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
Definition: Stack.cpp:12
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, std::vector< std::pair< unsigned int, unsigned int >> m_padList, const T *inputData, T *outData, const float padValue)
Definition: Pad.cpp:22
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:121
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
std::vector< TensorInfo > m_InputTensorInfos
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
Layer * m_Layer
DataType
Definition: Types.hpp:32
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
Definition: INetwork.hpp:37
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:20
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
This layer represents a BatchToSpaceNd operation.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:31
std::vector< TensorInfo > m_OutputTensorInfos
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners)
Definition: Resize.cpp:35
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:30
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ClWorkloadFactory FactoryType
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
A MeanDescriptor for the MeanLayer.
void Mean(const armnn::TensorInfo &inputInfo, const armnn::TensorInfo &outputInfo, const std::vector< unsigned int > &axis, Decoder< float > &input, Encoder< float > &output)
Definition: Mean.cpp:71
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output)
Definition: Gather.cpp:18
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:143
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
An input connection slot for a layer.
Definition: INetwork.hpp:24
A BatchNormalizationDescriptor for the BatchNormalizationLayer.