ArmNN
 22.02
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <armnn/Logging.hpp>
9 
11 
12 namespace armnn
13 {
14 
15 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
17 {
19 
20  if (!backendRegistry.IsBackendRegistered(backend))
21  {
22  return LayerSupportHandle(nullptr);
23  }
24 
25  auto factoryFunc = backendRegistry.GetFactory(backend);
26  auto backendObject = factoryFunc();
27  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
28 }
29 
30 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
31  const BackendCapabilities& capabilities)
32 {
33  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
34  {
35  const auto& capability = capabilities.GetOption(i);
36  if (backendCapabilityName == capability.GetName())
37  {
38  return capability;
39  }
40  }
41  return EmptyOptional();
42 }
43 
44 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
45  const armnn::BackendId& backend)
46 {
47  auto const& backendRegistry = armnn::BackendRegistryInstance();
48  if (backendRegistry.IsBackendRegistered(backend))
49  {
50  auto factoryFunc = backendRegistry.GetFactory(backend);
51  auto backendObject = factoryFunc();
52  auto capabilities = backendObject->GetCapabilities();
53  return GetCapability(backendCapabilityName, capabilities);
54  }
55  return EmptyOptional();
56 }
57 
58 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
59 {
60  return GetCapability(name, capabilities).has_value();
61 }
62 
63 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
64 {
65  return GetCapability(name, backend).has_value();
66 }
67 
68 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
69 {
70  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
71  {
72  const auto& backendCapability = capabilities.GetOption(i);
73  if (capability.GetName() == backendCapability.GetName())
74  {
75  if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
76  {
77  return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
78  }
79  else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
80  {
81  return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
82  }
83  else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
84  {
85  return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
86  }
87  else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
88  {
89  return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
90  }
91  else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
92  {
93  return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
94  }
95  }
96  }
97  return false;
98 }
99 
100 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
101 {
102  auto const& backendRegistry = armnn::BackendRegistryInstance();
103  if (backendRegistry.IsBackendRegistered(backend))
104  {
105  auto factoryFunc = backendRegistry.GetFactory(backend);
106  auto backendObject = factoryFunc();
107  auto capabilities = backendObject->GetCapabilities();
108  return HasCapability(backendOption, capabilities);
109  }
110  return false;
111 }
112 
113 /// Convenience function to check a capability on a backend
115 {
116  bool hasCapability = false;
117  auto const& backendRegistry = armnn::BackendRegistryInstance();
118  if (backendRegistry.IsBackendRegistered(backend))
119  {
120  auto factoryFunc = backendRegistry.GetFactory(backend);
121  auto backendObject = factoryFunc();
123  hasCapability = backendObject->HasCapability(capability);
125  }
126  return hasCapability;
127 }
128 
129 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
130 {
131  auto const& backendRegistry = armnn::BackendRegistryInstance();
132  if (backendRegistry.IsBackendRegistered(backend))
133  {
134  auto factoryFunc = backendRegistry.GetFactory(backend);
135  auto backendObject = factoryFunc();
136  return backendObject->GetNumberOfCacheFiles();
137  }
138  return 0;
139 }
140 
142 {
143  if (m_LayerSupport)
144  {
145  return true;
146  }
147 
148  return false;
149 }
150 
151 using TensorInfos = std::vector<TensorInfo>;
152 
154  const TensorInfo& output,
155  const ActivationDescriptor& descriptor,
156  Optional<std::string&> reasonIfUnsupported)
157 {
158  TensorInfos infos{input, output};
159 
160  return m_LayerSupport->IsLayerSupported(LayerType::Activation,
161  infos,
162  descriptor,
163  EmptyOptional(),
164  EmptyOptional(),
165  reasonIfUnsupported);
166 }
167 
169  const TensorInfo& input1,
170  const TensorInfo& output,
171  Optional<std::string&> reasonIfUnsupported)
172 {
173  TensorInfos infos{input0, input1, output};
174 
175  return m_LayerSupport->IsLayerSupported(LayerType::Addition,
176  infos,
177  BaseDescriptor(),
178  EmptyOptional(),
179  EmptyOptional(),
180  reasonIfUnsupported);
181 }
182 
184  const TensorInfo& output,
185  const ArgMinMaxDescriptor& descriptor,
186  Optional<std::string&> reasonIfUnsupported)
187 {
188  TensorInfos infos{input, output};
189 
190  return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
191  infos,
192  descriptor,
193  EmptyOptional(),
194  EmptyOptional(),
195  reasonIfUnsupported);
196 }
197 
199  const TensorInfo& output,
200  const TensorInfo& mean,
201  const TensorInfo& var,
202  const TensorInfo& beta,
203  const TensorInfo& gamma,
204  const BatchNormalizationDescriptor& descriptor,
205  Optional<std::string&> reasonIfUnsupported)
206 {
207  TensorInfos infos{input, output, mean, var, beta, gamma};
208 
209  return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
210  infos,
211  descriptor,
212  EmptyOptional(),
213  EmptyOptional(),
214  reasonIfUnsupported);
215 }
216 
218  const TensorInfo& output,
219  const BatchToSpaceNdDescriptor& descriptor,
220  Optional<std::string&> reasonIfUnsupported)
221 {
222  TensorInfos infos{input, output};
223 
224  return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
225  infos,
226  descriptor,
227  EmptyOptional(),
228  EmptyOptional(),
229  reasonIfUnsupported);
230 }
231 
233  const TensorInfo& output,
234  Optional<std::string&> reasonIfUnsupported)
235 {
236  TensorInfos infos{input, output};
237 
238  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
239  infos,
240  BaseDescriptor(),
241  EmptyOptional(),
242  EmptyOptional(),
243  reasonIfUnsupported);
244 }
245 
247  const TensorInfo &output,
248  const ChannelShuffleDescriptor &descriptor,
249  Optional<std::string &> reasonIfUnsupported)
250 {
251  TensorInfos infos{input, output};
252 
253  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
254  infos,
255  descriptor,
256  EmptyOptional(),
257  EmptyOptional(),
258  reasonIfUnsupported);
259 }
260 
262  const TensorInfo& input1,
263  const TensorInfo& output,
264  const ComparisonDescriptor& descriptor,
265  Optional<std::string&> reasonIfUnsupported)
266 {
267  TensorInfos infos{input0, input1, output};
268 
269  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
270  infos,
271  descriptor,
272  EmptyOptional(),
273  EmptyOptional(),
274  reasonIfUnsupported);
275 }
276 
277 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
278  const TensorInfo& output,
279  const OriginsDescriptor& descriptor,
280  Optional<std::string&> reasonIfUnsupported)
281 {
282  TensorInfos infos;
283  for (const TensorInfo* inputInfo : inputs)
284  {
285  infos.push_back(*inputInfo);
286  }
287  infos.push_back(output);
288 
289  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
290  infos,
291  descriptor,
292  EmptyOptional(),
293  EmptyOptional(),
294  reasonIfUnsupported);
295 }
296 
298  Optional<std::string&> reasonIfUnsupported)
299 {
300  TensorInfos infos{output};
301 
302  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
303  infos,
304  BaseDescriptor(),
305  EmptyOptional(),
306  EmptyOptional(),
307  reasonIfUnsupported);
308 }
309 
311  const TensorInfo& output,
312  Optional<std::string&> reasonIfUnsupported)
313 {
314  TensorInfos infos{input, output};
315 
316  return m_LayerSupport->IsLayerSupported(LayerType::ConvertBf16ToFp32,
317  infos,
318  BaseDescriptor(),
319  EmptyOptional(),
320  EmptyOptional(),
321  reasonIfUnsupported);
322 }
323 
325  const TensorInfo& output,
326  Optional<std::string&> reasonIfUnsupported)
327 {
328  TensorInfos infos{input, output};
329 
330  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToBf16,
331  infos,
332  BaseDescriptor(),
333  EmptyOptional(),
334  EmptyOptional(),
335  reasonIfUnsupported);
336 }
337 
339  const TensorInfo& output,
340  Optional<std::string&> reasonIfUnsupported)
341 {
342  TensorInfos infos{input, output};
343 
344  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
345  infos,
346  BaseDescriptor(),
347  EmptyOptional(),
348  EmptyOptional(),
349  reasonIfUnsupported);
350 }
351 
353  const TensorInfo& output,
354  Optional<std::string&> reasonIfUnsupported)
355 {
356  TensorInfos infos{input, output};
357 
358  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
359  infos,
360  BaseDescriptor(),
361  EmptyOptional(),
362  EmptyOptional(),
363  reasonIfUnsupported);
364 }
365 
367  const TensorInfo& output,
368  const Convolution2dDescriptor& descriptor,
369  const TensorInfo& weights,
370  const Optional<TensorInfo>& biases,
371  Optional<std::string&> reasonIfUnsupported)
372 {
373  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
374  TensorInfos infos{input, output, weights, biasesVal};
375 
376  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
377  infos,
378  descriptor,
379  EmptyOptional(),
380  EmptyOptional(),
381  reasonIfUnsupported);
382 }
383 
385  const TensorInfo& output,
386  const Convolution3dDescriptor& descriptor,
387  const TensorInfo& weights,
388  const Optional<TensorInfo>& biases,
389  Optional<std::string&> reasonIfUnsupported)
390 {
391  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
392  TensorInfos infos{input, output, weights, biasesVal};
393 
394  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
395  infos,
396  descriptor,
397  EmptyOptional(),
398  EmptyOptional(),
399  reasonIfUnsupported);
400 }
401 
403  const TensorInfo& output,
404  Optional<std::string&> reasonIfUnsupported)
405 {
406  TensorInfos infos{input, output};
407 
408  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
409  infos,
410  BaseDescriptor(),
411  EmptyOptional(),
412  EmptyOptional(),
413  reasonIfUnsupported);
414 }
415 
417  const TensorInfo& output,
418  const DepthToSpaceDescriptor& descriptor,
419  Optional<std::string&> reasonIfUnsupported)
420 {
421  TensorInfos infos{input, output};
422 
423  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
424  infos,
425  descriptor,
426  EmptyOptional(),
427  EmptyOptional(),
428  reasonIfUnsupported);
429 }
430 
432  const TensorInfo& input,
433  const TensorInfo& output,
434  const DepthwiseConvolution2dDescriptor& descriptor,
435  const TensorInfo& weights,
436  const Optional<TensorInfo>& biases,
437  Optional<std::string&> reasonIfUnsupported)
438 {
439  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
440  TensorInfos infos{input, output, weights, biasesVal};
441 
442  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
443  infos,
444  descriptor,
445  EmptyOptional(),
446  EmptyOptional(),
447  reasonIfUnsupported);
448 }
449 
451  const TensorInfo& output,
452  Optional<std::string&> reasonIfUnsupported)
453 {
454  TensorInfos infos{input, output};
455 
456  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
457  infos,
458  BaseDescriptor(),
459  EmptyOptional(),
460  EmptyOptional(),
461  reasonIfUnsupported);
462 }
463 
465  const TensorInfo& scores,
466  const TensorInfo& anchors,
467  const TensorInfo& detectionBoxes,
468  const TensorInfo& detectionClasses,
469  const TensorInfo& detectionScores,
470  const TensorInfo& numDetections,
471  const DetectionPostProcessDescriptor& descriptor,
472  Optional<std::string&> reasonIfUnsupported)
473 {
474  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
475 
476  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
477  infos,
478  descriptor,
479  EmptyOptional(),
480  EmptyOptional(),
481  reasonIfUnsupported);
482 }
483 
485  const TensorInfo& input,
486  const TensorInfo& output,
487  const DepthwiseConvolution2dDescriptor& descriptor,
488  const TensorInfo& weights,
489  const Optional<TensorInfo>& biases,
490  Optional<std::string&> reasonIfUnsupported)
491 {
492  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
493  TensorInfos infos{input, output, weights, biasesVal};
494 
495  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
496  infos,
497  descriptor,
498  EmptyOptional(),
499  EmptyOptional(),
500  reasonIfUnsupported);
501 }
502 
504  const TensorInfo& input1,
505  const TensorInfo& output,
506  Optional<std::string&> reasonIfUnsupported)
507 {
508  TensorInfos infos{input0, input1, output};
509 
510  return m_LayerSupport->IsLayerSupported(LayerType::Division,
511  infos,
512  BaseDescriptor(),
513  EmptyOptional(),
514  EmptyOptional(),
515  reasonIfUnsupported);
516 }
517 
519  const TensorInfo& output,
520  const ElementwiseUnaryDescriptor& descriptor,
521  Optional<std::string&> reasonIfUnsupported)
522 {
523  TensorInfos infos{input, output};
524 
525  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
526  infos,
527  descriptor,
528  EmptyOptional(),
529  EmptyOptional(),
530  reasonIfUnsupported);
531 }
532 
534  const FakeQuantizationDescriptor& descriptor,
535  Optional<std::string&> reasonIfUnsupported)
536 {
537  TensorInfos infos{input};
538 
539  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
540  infos,
541  descriptor,
542  EmptyOptional(),
543  EmptyOptional(),
544  reasonIfUnsupported);
545 }
546 
548  const TensorInfo& output,
549  const FillDescriptor& descriptor,
550  Optional<std::string&> reasonIfUnsupported)
551 {
552  TensorInfos infos{input, output};
553 
554  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
555  infos,
556  descriptor,
557  EmptyOptional(),
558  EmptyOptional(),
559  reasonIfUnsupported);
560 }
561 
563  const TensorInfo& output,
564  Optional<std::string&> reasonIfUnsupported)
565 {
566  TensorInfos infos{input, output};
567 
568  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
569  infos,
570  BaseDescriptor(),
571  EmptyOptional(),
572  EmptyOptional(),
573  reasonIfUnsupported);
574 }
575 
577  const TensorInfo& output,
578  const TensorInfo& weights,
579  const TensorInfo& biases,
580  const FullyConnectedDescriptor& descriptor,
581  Optional<std::string&> reasonIfUnsupported)
582 {
583  if(!m_BackendId.IsUndefined())
584  {
585  auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
586  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
587  {
588  if(!weights.IsConstant())
589  {
590  if (reasonIfUnsupported.has_value())
591  {
592  reasonIfUnsupported.value() =
593  "This backend might not support non constant weights. "
594  "If weights are constant make sure to set IsConstant when creating TensorInfo";
595  }
596 
597  return false;
598  }
599  if(descriptor.m_BiasEnabled)
600  {
601  if(!biases.IsConstant())
602  {
603  if (reasonIfUnsupported.has_value())
604  {
605  reasonIfUnsupported.value() =
606  "This backend might not support non constant weights. "
607  "If weights are constant make sure to set IsConstant when creating TensorInfo";
608  }
609  return false;
610  }
611  }
612 
613  // At the first stage we will only print a warning. this is to give
614  // backend developers a chance to adopt and read weights from input slots.
615  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
616  "If you are a backend developer please find more information in our "
617  "doxygen documentation on github https://github.com/ARM-software/armnn "
618  "under the keyword 'ConstTensorsAsInputs'.";
619  }
620 
621  if(!descriptor.m_ConstantWeights)
622  {
623  capability = GetCapability("NonConstWeights", m_BackendId);
624  if (capability.has_value() && capability.value().GetValue().AsBool() == true)
625  {
626  return true;
627  }
628  return false;
629  }
630  }
631 
632  TensorInfos infos{input, output, weights, biases};
633 
634  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
635  infos,
636  descriptor,
637  EmptyOptional(),
638  EmptyOptional(),
639  reasonIfUnsupported);
640 }
641 
643  const TensorInfo& input1,
644  const TensorInfo& output,
645  const GatherDescriptor& descriptor,
646  Optional<std::string&> reasonIfUnsupported)
647 {
648  TensorInfos infos{input0, input1, output};
649 
650  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
651  infos,
652  descriptor,
653  EmptyOptional(),
654  EmptyOptional(),
655  reasonIfUnsupported);
656 }
657 
659  Optional<std::string&> reasonIfUnsupported)
660 {
661  TensorInfos infos{input};
662 
663  return m_LayerSupport->IsLayerSupported(LayerType::Input,
664  infos,
665  BaseDescriptor(),
666  EmptyOptional(),
667  EmptyOptional(),
668  reasonIfUnsupported);
669 }
670 
672  const TensorInfo& input,
673  const TensorInfo& output,
674  const InstanceNormalizationDescriptor& descriptor,
675  Optional<std::string&> reasonIfUnsupported)
676 {
677  TensorInfos infos{input, output};
678 
679  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
680  infos,
681  descriptor,
682  EmptyOptional(),
683  EmptyOptional(),
684  reasonIfUnsupported);
685 }
686 
688  const TensorInfo& output,
689  const L2NormalizationDescriptor& descriptor,
690  Optional<std::string&> reasonIfUnsupported)
691 {
692  TensorInfos infos{input, output};
693 
694  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
695  infos,
696  descriptor,
697  EmptyOptional(),
698  EmptyOptional(),
699  reasonIfUnsupported);
700 }
701 
703  const TensorInfo& input1,
704  const TensorInfo& output,
705  const LogicalBinaryDescriptor& descriptor,
706  Optional<std::string&> reasonIfUnsupported)
707 {
708  TensorInfos infos{input0, input1, output};
709 
710  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
711  infos,
712  descriptor,
713  EmptyOptional(),
714  EmptyOptional(),
715  reasonIfUnsupported);
716 }
717 
719  const TensorInfo& output,
720  const ElementwiseUnaryDescriptor& descriptor,
721  Optional<std::string&> reasonIfUnsupported)
722 {
723  TensorInfos infos{input, output};
724 
725  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
726  infos,
727  descriptor,
728  EmptyOptional(),
729  EmptyOptional(),
730  reasonIfUnsupported);
731 }
732 
734  const TensorInfo& output,
735  const LogSoftmaxDescriptor& descriptor,
736  Optional<std::string&> reasonIfUnsupported)
737 {
738  TensorInfos infos{input, output};
739 
740  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
741  infos,
742  descriptor,
743  EmptyOptional(),
744  EmptyOptional(),
745  reasonIfUnsupported);
746 }
747 
749  const TensorInfo& outputStateIn,
750  const TensorInfo& cellStateIn,
751  const TensorInfo& scratchBuffer,
752  const TensorInfo& outputStateOut,
753  const TensorInfo& cellStateOut,
754  const TensorInfo& output,
755  const LstmDescriptor& descriptor,
756  const LstmInputParamsInfo& paramsInfo,
757  Optional<std::string&> reasonIfUnsupported)
758 {
759  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
760 
761  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
762  infos,
763  descriptor,
764  paramsInfo,
765  EmptyOptional(),
766  reasonIfUnsupported);
767 }
768 
770  const TensorInfo& input1,
771  const TensorInfo& output,
772  Optional<std::string&> reasonIfUnsupported)
773 {
774  TensorInfos infos{input0, input1, output};
775 
776  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
777  infos,
778  BaseDescriptor(),
779  EmptyOptional(),
780  EmptyOptional(),
781  reasonIfUnsupported);
782 }
783 
785  const TensorInfo& output,
786  const MeanDescriptor& descriptor,
787  Optional<std::string&> reasonIfUnsupported)
788 {
789  TensorInfos infos{input, output};
790 
791  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
792  infos,
793  descriptor,
794  EmptyOptional(),
795  EmptyOptional(),
796  reasonIfUnsupported);
797 }
798 
800  const TensorInfo& output,
801  Optional<std::string&> reasonIfUnsupported)
802 {
803  TensorInfos infos{input, output};
804 
805  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
806  infos,
807  BaseDescriptor(),
808  EmptyOptional(),
809  EmptyOptional(),
810  reasonIfUnsupported);
811 }
812 
814  const TensorInfo& output,
815  Optional<std::string&> reasonIfUnsupported)
816 {
817  TensorInfos infos{input, output};
818 
819  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
820  infos,
821  BaseDescriptor(),
822  EmptyOptional(),
823  EmptyOptional(),
824  reasonIfUnsupported);
825 }
826 
828  const TensorInfo& input1,
829  const TensorInfo& output,
830  Optional<std::string&> reasonIfUnsupported)
831 {
832  TensorInfos infos{input0, input1, output};
833 
834  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
835  infos,
836  BaseDescriptor(),
837  EmptyOptional(),
838  EmptyOptional(),
839  reasonIfUnsupported);
840 }
841 
843  const TensorInfo& input1,
844  const TensorInfo& output,
845  Optional<std::string&> reasonIfUnsupported)
846 {
847  TensorInfos infos{input0, input1, output};
848 
849  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
850  infos,
851  BaseDescriptor(),
852  EmptyOptional(),
853  EmptyOptional(),
854  reasonIfUnsupported);
855 }
856 
858  const TensorInfo& input1,
859  const TensorInfo& output,
860  Optional<std::string&> reasonIfUnsupported)
861 {
862  TensorInfos infos{input0, input1, output};
863 
864  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
865  infos,
866  BaseDescriptor(),
867  EmptyOptional(),
868  EmptyOptional(),
869  reasonIfUnsupported);
870 }
871 
873  const TensorInfo& output,
874  const NormalizationDescriptor& descriptor,
875  Optional<std::string&> reasonIfUnsupported)
876 {
877  TensorInfos infos{input, output};
878 
879  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
880  infos,
881  descriptor,
882  EmptyOptional(),
883  EmptyOptional(),
884  reasonIfUnsupported);
885 }
886 
888  Optional<std::string&> reasonIfUnsupported)
889 {
890  TensorInfos infos{output};
891 
892  return m_LayerSupport->IsLayerSupported(LayerType::Output,
893  infos,
894  BaseDescriptor(),
895  EmptyOptional(),
896  EmptyOptional(),
897  reasonIfUnsupported);
898 }
899 
901  const TensorInfo& output,
902  const PadDescriptor& descriptor,
903  Optional<std::string&> reasonIfUnsupported)
904 {
905  TensorInfos infos{input, output};
906 
907  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
908  infos,
909  descriptor,
910  EmptyOptional(),
911  EmptyOptional(),
912  reasonIfUnsupported);
913 }
914 
916  const TensorInfo& output,
917  const PermuteDescriptor& descriptor,
918  Optional<std::string&> reasonIfUnsupported)
919 {
920  TensorInfos infos{input, output};
921 
922  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
923  infos,
924  descriptor,
925  EmptyOptional(),
926  EmptyOptional(),
927  reasonIfUnsupported);
928 }
929 
931  const TensorInfo& output,
932  const Pooling2dDescriptor& descriptor,
933  Optional<std::string&> reasonIfUnsupported)
934 {
935  TensorInfos infos{input, output};
936 
937  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
938  infos,
939  descriptor,
940  EmptyOptional(),
941  EmptyOptional(),
942  reasonIfUnsupported);
943 }
944 
946  const TensorInfo& output,
947  const Pooling3dDescriptor& descriptor,
948  Optional<std::string&> reasonIfUnsupported)
949 {
950  TensorInfos infos{input, output};
951 
952  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
953  infos,
954  descriptor,
955  EmptyOptional(),
956  EmptyOptional(),
957  reasonIfUnsupported);
958 }
959 
961  const PreCompiledDescriptor& descriptor,
962  Optional<std::string&> reasonIfUnsupported)
963 {
964  TensorInfos infos{input};
965 
966  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
967  infos,
968  descriptor,
969  EmptyOptional(),
970  EmptyOptional(),
971  reasonIfUnsupported);
972 }
973 
975  const TensorInfo& alpha,
976  const TensorInfo& output,
977  Optional<std::string&> reasonIfUnsupported)
978 {
979  TensorInfos infos{input, alpha, output};
980 
981  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
982  infos,
983  BaseDescriptor(),
984  EmptyOptional(),
985  EmptyOptional(),
986  reasonIfUnsupported);
987 }
988 
990  const TensorInfo& output,
991  Optional<std::string&> reasonIfUnsupported)
992 {
993  TensorInfos infos{input, output};
994 
995  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
996  infos,
997  BaseDescriptor(),
998  EmptyOptional(),
999  EmptyOptional(),
1000  reasonIfUnsupported);
1001 }
1002 
1004  const TensorInfo& previousOutputIn,
1005  const TensorInfo& previousCellStateIn,
1006  const TensorInfo& outputStateOut,
1007  const TensorInfo& cellStateOut,
1008  const TensorInfo& output,
1009  const QLstmDescriptor& descriptor,
1010  const LstmInputParamsInfo& paramsInfo,
1011  Optional<std::string&> reasonIfUnsupported)
1012 {
1013  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1014 
1015  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1016  infos,
1017  descriptor,
1018  paramsInfo,
1019  EmptyOptional(),
1020  reasonIfUnsupported);
1021 }
1022 
1024  const TensorInfo& previousCellStateIn,
1025  const TensorInfo& previousOutputIn,
1026  const TensorInfo& cellStateOut,
1027  const TensorInfo& output,
1028  const QuantizedLstmInputParamsInfo& paramsInfo,
1029  Optional<std::string&> reasonIfUnsupported)
1030 {
1031  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1032 
1033  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1034  infos,
1035  BaseDescriptor(),
1036  EmptyOptional(),
1037  paramsInfo,
1038  reasonIfUnsupported);
1039 }
1040 
1042  const TensorInfo& output,
1043  Optional<std::string&> reasonIfUnsupported)
1044 {
1045  TensorInfos infos{input, output};
1046 
1047  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1048  infos,
1049  BaseDescriptor(),
1050  EmptyOptional(),
1051  EmptyOptional(),
1052  reasonIfUnsupported);
1053 }
1054 
1056  const TensorInfo& output,
1057  const ReduceDescriptor& descriptor,
1058  Optional<std::string&> reasonIfUnsupported)
1059 {
1060  TensorInfos infos{input, output};
1061 
1062  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1063  infos,
1064  descriptor,
1065  EmptyOptional(),
1066  EmptyOptional(),
1067  reasonIfUnsupported);
1068 }
1069 
1071  const TensorInfo& output,
1072  const ReshapeDescriptor& descriptor,
1073  Optional<std::string&> reasonIfUnsupported)
1074 {
1075  TensorInfos infos{input, output};
1076 
1077  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1078  infos,
1079  descriptor,
1080  EmptyOptional(),
1081  EmptyOptional(),
1082  reasonIfUnsupported);
1083 }
1084 
1086  const TensorInfo& output,
1087  const ResizeDescriptor& descriptor,
1088  Optional<std::string&> reasonIfUnsupported)
1089 {
1090  TensorInfos infos{input, output};
1091 
1092  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1093  infos,
1094  descriptor,
1095  EmptyOptional(),
1096  EmptyOptional(),
1097  reasonIfUnsupported);
1098 }
1099 
1101  const TensorInfo& output,
1102  Optional<std::string&> reasonIfUnsupported)
1103 {
1104  TensorInfos infos{input, output};
1105 
1106  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1107  infos,
1108  BaseDescriptor(),
1109  EmptyOptional(),
1110  EmptyOptional(),
1111  reasonIfUnsupported);
1112 }
1113 
1115  const TensorInfo& output,
1116  const SliceDescriptor& descriptor,
1117  Optional<std::string&> reasonIfUnsupported)
1118 {
1119  TensorInfos infos{input, output};
1120 
1121  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1122  infos,
1123  descriptor,
1124  EmptyOptional(),
1125  EmptyOptional(),
1126  reasonIfUnsupported);
1127 }
1128 
1130  const TensorInfo& output,
1131  const SoftmaxDescriptor& descriptor,
1132  Optional<std::string&> reasonIfUnsupported)
1133 {
1134  TensorInfos infos{input, output};
1135 
1136  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1137  infos,
1138  descriptor,
1139  EmptyOptional(),
1140  EmptyOptional(),
1141  reasonIfUnsupported);
1142 }
1143 
1145  const TensorInfo& output,
1146  const SpaceToBatchNdDescriptor& descriptor,
1147  Optional<std::string&> reasonIfUnsupported)
1148 {
1149  TensorInfos infos{input, output};
1150 
1151  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1152  infos,
1153  descriptor,
1154  EmptyOptional(),
1155  EmptyOptional(),
1156  reasonIfUnsupported);
1157 }
1158 
1160  const TensorInfo& output,
1161  const SpaceToDepthDescriptor& descriptor,
1162  Optional<std::string&> reasonIfUnsupported)
1163 {
1164  TensorInfos infos{input, output};
1165 
1166  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1167  infos,
1168  descriptor,
1169  EmptyOptional(),
1170  EmptyOptional(),
1171  reasonIfUnsupported);
1172 }
1173 
1175  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1176  const ViewsDescriptor& descriptor,
1177  Optional<std::string&> reasonIfUnsupported)
1178 {
1179  TensorInfos infos{input};
1180  for (TensorInfo outInfo : outputs)
1181  {
1182  infos.push_back(outInfo);
1183  }
1184 
1185  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1186  infos,
1187  descriptor,
1188  EmptyOptional(),
1189  EmptyOptional(),
1190  reasonIfUnsupported);
1191 }
1192 
1193 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1194  const TensorInfo& output,
1195  const StackDescriptor& descriptor,
1196  Optional<std::string&> reasonIfUnsupported)
1197 {
1198  TensorInfos infos;
1199  for (const TensorInfo* inputInfo : inputs)
1200  {
1201  infos.push_back(*inputInfo);
1202  }
1203  infos.push_back(output);
1204 
1205  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1206  infos,
1207  descriptor,
1208  EmptyOptional(),
1209  EmptyOptional(),
1210  reasonIfUnsupported);
1211 }
1212 
1213 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
1214  const std::vector<const TensorInfo*>& outputs,
1215  const StandInDescriptor& descriptor,
1216  Optional<std::string&> reasonIfUnsupported)
1217 {
1218  TensorInfos infos;
1219  for (const TensorInfo* inputInfo : inputs)
1220  {
1221  infos.push_back(*inputInfo);
1222  }
1223  for (const TensorInfo* outputInfo : outputs)
1224  {
1225  infos.push_back(*outputInfo);
1226  }
1227 
1228  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1229  infos,
1230  descriptor,
1231  EmptyOptional(),
1232  EmptyOptional(),
1233  reasonIfUnsupported);
1234 }
1235 
1236 
1238  const TensorInfo& output,
1239  const StridedSliceDescriptor& descriptor,
1240  Optional<std::string&> reasonIfUnsupported)
1241 {
1242  TensorInfos infos{input, output};
1243 
1244  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1245  infos,
1246  descriptor,
1247  EmptyOptional(),
1248  EmptyOptional(),
1249  reasonIfUnsupported);
1250 }
1251 
1253  const TensorInfo& input1,
1254  const TensorInfo& output,
1255  Optional<std::string&> reasonIfUnsupported)
1256 {
1257  TensorInfos infos{input0, input1, output};
1258 
1259  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1260  infos,
1261  BaseDescriptor(),
1262  EmptyOptional(),
1263  EmptyOptional(),
1264  reasonIfUnsupported);
1265 }
1266 
1268  const TensorInfo& input1,
1269  const TensorInfo& output0,
1270  const TensorInfo& output1,
1271  Optional<std::string&> reasonIfUnsupported)
1272 {
1273  TensorInfos infos{input0, input1, output0, output1};
1274 
1275  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1276  infos,
1277  BaseDescriptor(),
1278  EmptyOptional(),
1279  EmptyOptional(),
1280  reasonIfUnsupported);
1281 }
1282 
1284  const TensorInfo& input,
1285  const TensorInfo& output,
1286  const TransposeConvolution2dDescriptor& descriptor,
1287  const TensorInfo& weights,
1288  const Optional<TensorInfo>& biases,
1289  Optional<std::string&> reasonIfUnsupported)
1290 {
1291  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1292  TensorInfos infos{input, output, weights, biasesVal};
1293 
1294  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1295  infos,
1296  descriptor,
1297  EmptyOptional(),
1298  EmptyOptional(),
1299  reasonIfUnsupported);
1300 }
1301 
1303  const TensorInfo& output,
1304  const TransposeDescriptor& descriptor,
1305  Optional<std::string&> reasonIfUnsupported)
1306 {
1307  TensorInfos infos{input, output};
1308 
1309  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1310  infos,
1311  descriptor,
1312  EmptyOptional(),
1313  EmptyOptional(),
1314  reasonIfUnsupported);
1315 }
1316 
1318  const TensorInfo& outputStateIn,
1319  const TensorInfo& cellStateIn,
1320  const TensorInfo& output,
1321  const Optional<TensorInfo>& hiddenStateOutput,
1322  const Optional<TensorInfo>& cellStateOutput,
1323  const LstmDescriptor& descriptor,
1324  const LstmInputParamsInfo& paramsInfo,
1325  Optional<std::string&> reasonIfUnsupported)
1326 {
1327  TensorInfo hiddenStateOutputVal = hiddenStateOutput.has_value() ? hiddenStateOutput.value() : TensorInfo();
1328  TensorInfo cellStateOutputVal = cellStateOutput.has_value() ? cellStateOutput.value() : TensorInfo();
1329  TensorInfos infos{input, outputStateIn, cellStateIn, hiddenStateOutputVal, cellStateOutputVal, output};
1330 
1331  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1332  infos,
1333  descriptor,
1334  paramsInfo,
1335  EmptyOptional(),
1336  reasonIfUnsupported);
1337 }
1338 
1339 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const BackendOption & GetOption(size_t idx) const
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConstant() const
Definition: Tensor.cpp:511
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Base class for all descriptors.
Definition: Descriptors.hpp:22
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
size_t GetOptionCount() const noexcept
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A L2NormalizationDescriptor for the L2NormalizationLayer.
BackendCapability
BackendCapability class.
Definition: Types.hpp:254
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
std::string AsString() const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling3dDescriptor for the Pooling3dLayer.
unsigned int AsUnsignedInt() const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool AsBool() const
Value getters.
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBool() const
Type getters.
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
std::vector< TensorInfo > TensorInfos
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:141
A SoftmaxDescriptor for the SoftmaxLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_ConstantWeights
Enable/disable constant weights and biases.