ArmNN  NotReleased
IsLayerSupportedTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
10 
11 #include <boost/core/ignore_unused.hpp>
12 
13 namespace
14 {
15 armnn::Graph dummyGraph;
16 
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
20 {
21  return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
22 }
23 
24 
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28 {
30 
31  for (unsigned int i=0; i < numInputs; i++)
32  {
33  info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
34  }
35 
36  for (unsigned int o=0; o < numOutputs; o++)
37  {
38  info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
39  }
40 
41  return info;
42 }
43 
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
46 struct DummyLayer
47 {
48  DummyLayer()
49  {
50  m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
51  }
52 
53  ~DummyLayer()
54  {
55  dummyGraph.EraseLayer(m_Layer);
56  }
57 
59 };
60 
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
64 {
65  DummyLayer()
66  {
67  m_Layer = dummyGraph.AddLayer<LayerType>("");
68  }
69 
70  ~DummyLayer()
71  {
72  dummyGraph.EraseLayer(m_Layer);
73  }
74 
76 };
77 
78 template<>
79 struct DummyLayer<armnn::BatchNormalizationLayer>
80 {
81  DummyLayer()
82  {
84  m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
86  m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
88  m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
90  m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
92  }
93 
94  ~DummyLayer()
95  {
96  dummyGraph.EraseLayer(m_Layer);
97  }
98 
100 };
101 
102 template<>
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
104 {
105  DummyLayer()
106  {
108  }
109 
110  ~DummyLayer()
111  {
112  dummyGraph.EraseLayer(m_Layer);
113  }
114 
116 };
117 
118 template<>
119 struct DummyLayer<armnn::ConstantLayer, void>
120 {
121  DummyLayer()
122  {
123  m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
124  }
125 
126  ~DummyLayer()
127  {
128  dummyGraph.EraseLayer(m_Layer);
129  }
130 
132 };
133 
134 template<>
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
136 {
137  DummyLayer()
138  {
140  }
141 
142  ~DummyLayer()
143  {
144  dummyGraph.EraseLayer(m_Layer);
145  }
146 
148 };
149 
150 template<>
151 struct DummyLayer<armnn::ConcatLayer>
152 {
153  DummyLayer()
154  {
155  armnn::OriginsDescriptor desc(2);
156  m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
157  }
158 
159  ~DummyLayer()
160  {
161  dummyGraph.EraseLayer(m_Layer);
162  }
163 
165 };
166 
167 template<>
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
169 {
170  DummyLayer()
171  {
173  }
174 
175  ~DummyLayer()
176  {
177  dummyGraph.EraseLayer(m_Layer);
178  }
179 
181 };
182 
183 template<>
184 struct DummyLayer<armnn::SplitterLayer>
185 {
186  DummyLayer()
187  {
188  armnn::ViewsDescriptor desc(1);
189  m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
190  }
191 
192  ~DummyLayer()
193  {
194  dummyGraph.EraseLayer(m_Layer);
195  }
196 
198 };
199 
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
202 {
203  DummyConvolutionLayer()
204  {
205  typename ConvolutionLayerType::DescriptorType desc;
206  desc.m_StrideX = 1;
207  desc.m_StrideY = 1;
208  m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
209  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
211  m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
213  }
214 
215  ~DummyConvolutionLayer()
216  {
217  dummyGraph.EraseLayer(m_Layer);
218  }
219 
220  ConvolutionLayerType* m_Layer;
221 };
222 
223 template<>
224 struct DummyLayer<armnn::Convolution2dLayer>
225  : public DummyConvolutionLayer<armnn::Convolution2dLayer>
226 {
227 };
228 
229 template<>
230 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
231  : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
232 {
233 };
234 
235 template<>
236 struct DummyLayer<armnn::TransposeConvolution2dLayer>
237  : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
238 {
239 };
240 
241 template<>
242 struct DummyLayer<armnn::DetectionPostProcessLayer>
243 {
244  DummyLayer()
245  {
247  m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
249  }
250 
251  ~DummyLayer()
252  {
253  dummyGraph.EraseLayer(m_Layer);
254  }
255 
257 };
258 
259 template <typename LstmLayerType>
260 struct DummyLstmLayer
261 {
262  DummyLstmLayer()
263  {
264  typename LstmLayerType::DescriptorType desc;
265  desc.m_CifgEnabled = false;
266 
267  m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
268  m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
270  m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
272  m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
274  m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
276  m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
278  m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
280  m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
282  m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
284  m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
286 
287  m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
289  m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
291  m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
293  m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
295  }
296 
297  ~DummyLstmLayer()
298  {
299  dummyGraph.EraseLayer(m_Layer);
300  }
301 
303 };
304 
305 template<>
306 struct DummyLayer<armnn::LstmLayer>
307  : public DummyLstmLayer<armnn::LstmLayer>
308 {
309 };
310 
311 template<>
312 struct DummyLayer<armnn::QuantizedLstmLayer, void>
313 {
314  DummyLayer()
315  {
316  m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
317 
318  m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
320  m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
322  m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
324  m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
326 
327  m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
329  m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
331  m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
333  m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
335 
336  m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
338  m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
340  m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
342  m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
344  }
345 
346  ~DummyLayer()
347  {
348  dummyGraph.EraseLayer(m_Layer);
349  }
350 
352 };
353 
354 template<>
355 struct DummyLayer<armnn::FullyConnectedLayer>
356 {
357  DummyLayer()
358  {
360  m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
361  m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
363  }
364 
365  ~DummyLayer()
366  {
367  dummyGraph.EraseLayer(m_Layer);
368  }
369 
371 };
372 
373 // Tag for giving LayerType entries a unique strong type each.
374 template<armnn::LayerType>
375 struct Tag{};
376 
377 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
378 template<armnn::DataType DataType> \
379 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
380 { \
381  using Type = armnn::name##Layer; \
382  using Desc = descType; \
383  using QueueDesc = armnn::name##QueueDescriptor; \
384  constexpr static const char* NameStr = #name; \
385  constexpr static const bool IsException = false; \
386  \
387  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
388  unsigned int nIn, unsigned int nOut) \
389  { \
390  QueueDesc desc; \
391  armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
392  return factory->Create##name(desc, info); \
393  } \
394 };
395 
396 // Define a layer policy specialization for use with the IsLayerSupported tests.
397 // Use this version for layers whose constructor takes 1 parameter(name).
398 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
399 
400 // Define a layer policy specialization for use with the IsLayerSupported tests.
401 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
402 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
403 
404 
405 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
406 template<armnn::DataType DataType> \
407 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
408 { \
409  using Type = armnn::name##Layer; \
410  using Desc = descType; \
411  constexpr static const char* NameStr = #name; \
412  constexpr static const bool IsException = true; \
413  \
414  static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
415  unsigned int nIn, unsigned int nOut) \
416  { \
417  boost::ignore_unused(factory, nIn, nOut); \
418  return std::unique_ptr<armnn::IWorkload>(); \
419  } \
420 };
421 
422 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
423 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
424 
425 // Layer policy template.
426 template<armnn::LayerType Type, armnn::DataType DataType>
427 struct LayerTypePolicy;
428 
429 // Every entry in the armnn::LayerType enum must be accounted for below.
431 
433 
435 
436 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
437 
439 
441 
443 
445 
446 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
447 
448 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
449 
450 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
451 
453 
455 
457 
459 
460 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
461 
463 
465 
466 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
467 
469 
471 
473 
475 
477 
478 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
479 
480 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
481 
483 
485 
487 
489 
491 
493 
494 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
495 
496 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
497 
499 
501 
503 
505 
507 
508 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
509 
511 
512 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
513 
515 
517 
519 
521 
523 
525 
527 
529 
531 
533 
535 
536 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
537 
539 
540 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
541 
542 
543 // Generic implementation to get the number of input slots for a given layer type;
544 template<armnn::LayerType Type>
545 unsigned int GetNumInputs(const armnn::Layer& layer)
546 {
547  return layer.GetNumInputSlots();
548 }
549 
550 // Generic implementation to get the number of output slots for a given layer type;
551 template<armnn::LayerType Type>
552 unsigned int GetNumOutputs(const armnn::Layer& layer)
553 {
554  return layer.GetNumOutputSlots();
555 }
556 
557 template<>
558 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
559 {
560  boost::ignore_unused(layer);
561  return 2;
562 }
563 
564 // Tests that the IsLayerSupported() function returns the correct value.
565 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
566 // Returns true if expectations are met, otherwise returns false.
567 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
568 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
569 {
570  using LayerPolicy = LayerTypePolicy<Type, DataType>;
571  using LayerType = typename LayerPolicy::Type;
572  using LayerDesc = typename LayerPolicy::Desc;
573  DummyLayer<LayerType, LayerDesc> layer;
574 
575  if (LayerPolicy::IsException) //Don't test exceptions to the rule.
576  {
577  return true;
578  }
579 
580  unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
581  unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
582 
583  // Make another dummy layer just to make IsLayerSupported have valid inputs.
584  DummyLayer<armnn::ConstantLayer, void> previousLayer;
585  // Set output of the previous layer to a dummy tensor.
586  armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
587  previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
588  // Connect all outputs of the previous layer to inputs of tested layer.
589  for (unsigned int i = 0; i < numIn; i++)
590  {
591  armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
592  armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
593  previousLayerOutputSlot.Connect(layerInputSlot);
594  }
595  // Set outputs of tested layer to a dummy tensor.
596  for (unsigned int i = 0; i < numOut; i++)
597  {
598  layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
599  }
600 
601  std::string layerName = LayerPolicy::NameStr;
602  std::string reasonIfUnsupported;
603  if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
604  {
605  std::string errorMsg = " layer expected support but found none.";
606  try
607  {
608  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
609  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
610  return retVal;
611  }
612  catch(const armnn::InvalidArgumentException& e)
613  {
614  boost::ignore_unused(e);
615  // This is ok since we throw InvalidArgumentException when creating the dummy workload.
616  return true;
617  }
618  catch(const std::exception& e)
619  {
620  errorMsg = e.what();
621  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
622  return false;
623  }
624  catch(...)
625  {
626  errorMsg = "Unexpected error while testing support for ";
627  BOOST_TEST_ERROR(errorMsg << layerName);
628  return false;
629  }
630  }
631  else
632  {
633  std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
634  try
635  {
636  bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
637  BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
638  return retVal;
639  }
640  // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
641  // using parameters that make IsLayerSupported() return false should throw an
642  // InvalidArgumentException or UnimplementedException.
643  catch(const armnn::InvalidArgumentException& e)
644  {
645  boost::ignore_unused(e);
646  return true;
647  }
648  catch(const armnn::UnimplementedException& e)
649  {
650  boost::ignore_unused(e);
651  return true;
652  }
653  catch(const std::exception& e)
654  {
655  errorMsg = e.what();
656  BOOST_TEST_ERROR(layerName << ": " << errorMsg);
657  return false;
658  }
659  catch(...)
660  {
661  errorMsg = "Unexpected error while testing support for ";
662  BOOST_TEST_ERROR(errorMsg << layerName);
663  return false;
664  }
665  }
666 }
667 
668 // Helper function to compute the next type in the LayerType enum.
669 constexpr armnn::LayerType NextType(armnn::LayerType type)
670 {
671  return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
672 }
673 
674 // Termination function for determining the end of the LayerType enumeration.
675 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
676 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
677 {
678  return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
679 }
680 
681 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
682 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
683 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
684 {
685  bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
686 
687  return v &&
688  IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
689  (factory, Tag<NextType(Type)>());
690 }
691 
692 // Helper function to pass through to the test framework.
693 template<typename FactoryType, armnn::DataType DataType>
694 bool IsLayerSupportedTests(FactoryType *factory)
695 {
696  return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
697 }
698 
699 template<armnn::LayerType Type>
700 bool TestLayerTypeMatches()
701 {
702  using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
703  using LayerType = typename LayerPolicy::Type;
704  using LayerDesc = typename LayerPolicy::Desc;
705  DummyLayer<LayerType, LayerDesc> layer;
706 
707  std::stringstream ss;
708  ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
709  bool v = Type == layer.m_Layer->GetType();
710  BOOST_CHECK_MESSAGE(v, ss.str());
711  return v;
712 }
713 
714 template<armnn::LayerType Type>
715 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
716 {
717  return TestLayerTypeMatches<Type>();
718 }
719 
720 template<armnn::LayerType Type>
721 bool LayerTypeMatchesTestImpl(Tag<Type>)
722 {
723  return TestLayerTypeMatches<Type>() &&
724  LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
725 }
726 
727 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
728 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
729 {
730  armnn::Graph graph;
731  LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
732 
733  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
734  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
735 
736  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
737  armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
738 
739  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
740  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
741  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
742  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
743 
744  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
745 
746  return result;
747 }
748 
749 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
750 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
751 {
752  armnn::Graph graph;
753  static const std::vector<unsigned> axes = {1, 0};
754  armnn::MeanDescriptor desc(axes, false);
755 
756  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
757 
758  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
759  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
760 
761  armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
762  armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
763 
764  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
765  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
766  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
767  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
768 
769  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
770 
771  return result;
772 }
773 
774 // Tests that IsMeanSupported fails when input tensor dimensions
775 // do not match output tensor dimensions when keepDims == true
776 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
777 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
778 {
779  armnn::Graph graph;
780  static const std::vector<unsigned> axes = {};
781  // Set keepDims == true
782  armnn::MeanDescriptor desc(axes, true);
783 
784  armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
785 
786  armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
787  armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
788 
789  // Mismatching number of tensor dimensions
790  armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
791  armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
792 
793  input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
794  input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
795  layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
796  layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
797 
798  bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
799 
800  return result;
801 }
802 
803 
804 } //namespace
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
unsigned int GetNumOutputSlots() const override
Definition: Layer.hpp:308
This layer represents a BatchToSpaceNd operation.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:143
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:121
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:19
This layer represents a batch normalization operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
QuantizedType Quantize(float value, float scale, int32_t offset)
Explicit specialization of Quantize for int8_t.
Definition: TypesUtils.cpp:31
This layer represents a detection postprocess operator.
std::vector< TensorInfo > m_OutputTensorInfos
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output)
Definition: Gather.cpp:18
#define DECLARE_LAYER_POLICY_2_PARAM(name)
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
float Dequantize(QuantizedType value, float scale, int32_t offset)
Definition: TypesUtils.cpp:47
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
This layer represents a fully connected operation.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, std::vector< std::pair< unsigned int, unsigned int >> m_padList, const T *inputData, T *outData, const float padValue)
Definition: Pad.cpp:22
This layer represents a split operation.
void ArgMinMax(Decoder< float > &in, int32_t *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:15
An LstmDescriptor for the LstmLayer.
void Mean(const armnn::TensorInfo &inputInfo, const armnn::TensorInfo &outputInfo, const std::vector< unsigned int > &axis, Decoder< float > &input, Encoder< float > &output)
Definition: Mean.cpp:71
std::vector< TensorInfo > m_InputTensorInfos
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
An input connection slot for a layer. The input slot can be connected to an output slot of the preced...
Definition: INetwork.hpp:24
#define DECLARE_LAYER_POLICY_1_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:15
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
A layer that the constant data can be bound to.
ClWorkloadFactory FactoryType
This layer represents a LSTM operation.
Definition: LstmLayer.hpp:77
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
An output connection slot for a layer. The output slot may be connected to 1 or more input slots of s...
Definition: INetwork.hpp:37
This layer represents a mean operation.
Definition: MeanLayer.hpp:14
DataType
Definition: Types.hpp:32
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:442
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners)
Definition: Resize.cpp:35
Layer * m_Layer
unsigned int GetNumInputSlots() const override
Definition: Layer.hpp:307
A MeanDescriptor for the MeanLayer.
virtual int Connect(IInputSlot &destination)=0
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:12
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
Definition: Stack.cpp:12
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:168
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:30
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310
This layer represents a QuantizedLstm operation.