ArmNN
 21.08
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <armnn/Logging.hpp>
9 
11 
12 namespace armnn
13 {
14 
15 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
17 {
19 
20  if (!backendRegistry.IsBackendRegistered(backend))
21  {
22  return LayerSupportHandle(nullptr);
23  }
24 
25  auto factoryFunc = backendRegistry.GetFactory(backend);
26  auto backendObject = factoryFunc();
27  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
28 }
29 
30 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
31  const BackendCapabilities& capabilities)
32 {
33  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
34  {
35  const auto& capability = capabilities.GetOption(i);
36  if (backendCapabilityName == capability.GetName())
37  {
38  return capability;
39  }
40  }
41  return EmptyOptional();
42 }
43 
44 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
45  const armnn::BackendId& backend)
46 {
47  auto const& backendRegistry = armnn::BackendRegistryInstance();
48  if (backendRegistry.IsBackendRegistered(backend))
49  {
50  auto factoryFunc = backendRegistry.GetFactory(backend);
51  auto backendObject = factoryFunc();
52  auto capabilities = backendObject->GetCapabilities();
53  return GetCapability(backendCapabilityName, capabilities);
54  }
55  return EmptyOptional();
56 }
57 
58 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
59 {
60  return GetCapability(name, capabilities).has_value();
61 }
62 
63 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
64 {
65  return GetCapability(name, backend).has_value();
66 }
67 
68 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
69 {
70  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
71  {
72  const auto& backendCapability = capabilities.GetOption(i);
73  if (capability.GetName() == backendCapability.GetName())
74  {
75  if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
76  {
77  return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
78  }
79  else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
80  {
81  return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
82  }
83  else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
84  {
85  return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
86  }
87  else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
88  {
89  return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
90  }
91  else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
92  {
93  return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
94  }
95  }
96  }
97  return false;
98 }
99 
100 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
101 {
102  auto const& backendRegistry = armnn::BackendRegistryInstance();
103  if (backendRegistry.IsBackendRegistered(backend))
104  {
105  auto factoryFunc = backendRegistry.GetFactory(backend);
106  auto backendObject = factoryFunc();
107  auto capabilities = backendObject->GetCapabilities();
108  return HasCapability(backendOption, capabilities);
109  }
110  return false;
111 }
112 
113 /// Convenience function to check a capability on a backend
115 {
116  bool hasCapability = false;
117  auto const& backendRegistry = armnn::BackendRegistryInstance();
118  if (backendRegistry.IsBackendRegistered(backend))
119  {
120  auto factoryFunc = backendRegistry.GetFactory(backend);
121  auto backendObject = factoryFunc();
123  hasCapability = backendObject->HasCapability(capability);
125  }
126  return hasCapability;
127 }
128 
130 {
131  if (m_LayerSupport)
132  {
133  return true;
134  }
135 
136  return false;
137 }
138 
139 
141  const TensorInfo& output,
142  Optional<std::string&> reasonIfUnsupported)
143 {
144  // Call the IsXXXLayerSupport function of the specific backend.
145  return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
146 }
147 
149  const TensorInfo& output,
150  const ActivationDescriptor& descriptor,
151  Optional<std::string&> reasonIfUnsupported)
152 {
153  return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
154 }
155 
157  const TensorInfo& input1,
158  const TensorInfo& output,
159  Optional<std::string&> reasonIfUnsupported)
160 {
161  return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
162 }
163 
165  const TensorInfo& output,
166  const ArgMinMaxDescriptor& descriptor,
167  Optional<std::string&> reasonIfUnsupported)
168 {
169  return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
170 }
171 
173  const TensorInfo& output,
174  const TensorInfo& mean,
175  const TensorInfo& var,
176  const TensorInfo& beta,
177  const TensorInfo& gamma,
178  const BatchNormalizationDescriptor& descriptor,
179  Optional<std::string&> reasonIfUnsupported)
180 {
181  return m_LayerSupport->IsBatchNormalizationSupported(input,
182  output,
183  mean,
184  var,
185  beta,
186  gamma,
187  descriptor,
188  reasonIfUnsupported.value());
189 }
190 
192  const TensorInfo& output,
193  const BatchToSpaceNdDescriptor& descriptor,
194  Optional<std::string&> reasonIfUnsupported)
195 {
196  return m_LayerSupport->IsBatchToSpaceNdSupported(input,
197  output,
198  descriptor,
199  reasonIfUnsupported.value());
200 }
201 
203  const TensorInfo& output,
204  Optional<std::string&> reasonIfUnsupported)
205 {
206  return m_LayerSupport->IsCastSupported(input, output, reasonIfUnsupported.value());
207 }
208 
210  const TensorInfo& input1,
211  const TensorInfo& output,
212  const ComparisonDescriptor& descriptor,
213  Optional<std::string&> reasonIfUnsupported)
214 {
215  return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
216 }
217 
218 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
219  const TensorInfo& output,
220  const OriginsDescriptor& descriptor,
221  Optional<std::string&> reasonIfUnsupported)
222 {
223  return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
224 }
225 
227  Optional<std::string&> reasonIfUnsupported)
228 {
229  return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
230 }
231 
233  const TensorInfo& output,
234  Optional<std::string&> reasonIfUnsupported)
235 {
236  return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
237 }
238 
240  const TensorInfo& output,
241  Optional<std::string&> reasonIfUnsupported)
242 {
243  return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
244 }
245 
247  const TensorInfo& output,
248  Optional<std::string&> reasonIfUnsupported)
249 {
250  return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
251 }
252 
254  const TensorInfo& output,
255  Optional<std::string&> reasonIfUnsupported)
256 {
257  return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
258 }
259 
261  const TensorInfo& output,
262  const Convolution2dDescriptor& descriptor,
263  const TensorInfo& weights,
264  const Optional<TensorInfo>& biases,
265  Optional<std::string&> reasonIfUnsupported)
266 {
267  return m_LayerSupport->IsConvolution2dSupported(input,
268  output,
269  descriptor,
270  weights,
271  biases,
272  reasonIfUnsupported.value());
273 }
274 
276  const TensorInfo& output,
277  Optional<std::string&> reasonIfUnsupported)
278 {
279  return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
280 }
281 
283  const TensorInfo& output,
284  const DepthToSpaceDescriptor& descriptor,
285  Optional<std::string&> reasonIfUnsupported)
286 {
287  return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
288 }
289 
291  const TensorInfo& input,
292  const TensorInfo& output,
293  const DepthwiseConvolution2dDescriptor& descriptor,
294  const TensorInfo& weights,
295  const Optional<TensorInfo>& biases,
296  Optional<std::string&> reasonIfUnsupported)
297 {
298  return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
299  output,
300  descriptor,
301  weights,
302  biases,
303  reasonIfUnsupported.value());
304 }
305 
307  const TensorInfo& output,
308  Optional<std::string&> reasonIfUnsupported)
309 {
310  return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
311 }
312 
314  const TensorInfo& scores,
315  const TensorInfo& anchors,
316  const TensorInfo& detectionBoxes,
317  const TensorInfo& detectionClasses,
318  const TensorInfo& detectionScores,
319  const TensorInfo& numDetections,
320  const DetectionPostProcessDescriptor& descriptor,
321  Optional<std::string&> reasonIfUnsupported)
322 {
323  return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
324  scores,
325  anchors,
326  detectionBoxes,
327  detectionClasses,
328  detectionScores,
329  numDetections,
330  descriptor,
331  reasonIfUnsupported);
332 }
333 
335  const TensorInfo& input,
336  const TensorInfo& output,
337  const DepthwiseConvolution2dDescriptor& descriptor,
338  const TensorInfo& weights,
339  const Optional<TensorInfo>& biases,
340  Optional<std::string&> reasonIfUnsupported)
341 {
342  return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
343  output,
344  descriptor,
345  weights,
346  biases,
347  reasonIfUnsupported);
348 }
349 
351  const TensorInfo& input1,
352  const TensorInfo& output,
353  Optional<std::string&> reasonIfUnsupported)
354 {
355  return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
356 }
357 
359  const TensorInfo& output,
360  const ElementwiseUnaryDescriptor& descriptor,
361  Optional<std::string&> reasonIfUnsupported)
362 {
363  return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
364 }
365 
367  const TensorInfo& input1,
368  const TensorInfo& output,
369  Optional<std::string&> reasonIfUnsupported)
370 {
371  return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
372 }
373 
375  const FakeQuantizationDescriptor& descriptor,
376  Optional<std::string&> reasonIfUnsupported)
377 {
378  return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
379 }
380 
382  const TensorInfo& output,
383  const FillDescriptor& descriptor,
384  Optional<std::string&> reasonIfUnsupported)
385 {
386  return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
387 }
388 
390  const TensorInfo& output,
391  Optional<std::string&> reasonIfUnsupported)
392 {
393  return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
394 }
395 
397  const TensorInfo& output,
398  const TensorInfo& weights,
399  const TensorInfo& biases,
400  const FullyConnectedDescriptor& descriptor,
401  Optional<std::string&> reasonIfUnsupported)
402 {
403  if(!m_BackendId.IsUndefined())
404  {
405  auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
406  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
407  {
408  if(!weights.IsConstant())
409  {
410  reasonIfUnsupported.value() =
411  "This backend might not support non constant weights. "
412  "If weights are constant make sure to set IsConstant when creating TensorInfo";
413  return false;
414  }
415  if(descriptor.m_BiasEnabled)
416  {
417  if(!biases.IsConstant())
418  {
419  reasonIfUnsupported.value() =
420  "This backend might not support non constant bias. "
421  "If bias are constant make sure to set IsConstant when creating TensorInfo";
422  return false;
423  }
424  }
425 
426  // At the first stage we will only print a warning. this is to give
427  // backend developers a chance to adopt and read weights from input slots.
428  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
429  "If you are a backend developer please find more information in our "
430  "doxygen documentation on github https://github.com/ARM-software/armnn "
431  "under the keyword 'ConstTensorsAsInputs'.";
432  }
433 
434  if(!descriptor.m_ConstantWeights)
435  {
436  auto capability = GetCapability("NonConstWeights", m_BackendId);
437  if (capability.has_value() && capability.value().GetValue().AsBool() == true)
438  {
439  return true;
440  }
441  return false;
442  }
443  }
444 
445  return m_LayerSupport->IsFullyConnectedSupported(input,
446  output,
447  weights,
448  biases,
449  descriptor,
450  reasonIfUnsupported.value());
451 }
452 
454  const TensorInfo& input1,
455  const TensorInfo& output,
456  Optional<std::string&> reasonIfUnsupported)
457 {
458  return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
459 }
460 
462  const TensorInfo& input1,
463  const TensorInfo& output,
464  const GatherDescriptor& descriptor,
465  Optional<std::string&> reasonIfUnsupported)
466 {
467  return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
468 }
469 
471  const TensorInfo& input1,
472  const TensorInfo& ouput,
473  Optional<std::string&> reasonIfUnsupported)
474 {
475  return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
476 }
477 
479  Optional<std::string&> reasonIfUnsupported)
480 {
481  return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
482 }
483 
485  const TensorInfo& input,
486  const TensorInfo& output,
487  const InstanceNormalizationDescriptor& descriptor,
488  Optional<std::string&> reasonIfUnsupported)
489 {
490  return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
491 }
492 
494  const TensorInfo& output,
495  const L2NormalizationDescriptor& descriptor,
496  Optional<std::string&> reasonIfUnsupported)
497 {
498  return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
499 }
500 
502  const TensorInfo& input1,
503  const TensorInfo& output,
504  const LogicalBinaryDescriptor& descriptor,
505  Optional<std::string&> reasonIfUnsupported)
506 {
507  return m_LayerSupport->IsLogicalBinarySupported(input0,
508  input1,
509  output,
510  descriptor,
511  reasonIfUnsupported.value());
512 }
513 
515  const TensorInfo& output,
516  const ElementwiseUnaryDescriptor& descriptor,
517  Optional<std::string&> reasonIfUnsupported)
518 {
519  return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
520 }
521 
523  const TensorInfo& output,
524  const LogSoftmaxDescriptor& descriptor,
525  Optional<std::string&> reasonIfUnsupported)
526 {
527  return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
528 }
529 
531  const TensorInfo& outputStateIn,
532  const TensorInfo& cellStateIn,
533  const TensorInfo& scratchBuffer,
534  const TensorInfo& outputStateOut,
535  const TensorInfo& cellStateOut,
536  const TensorInfo& output,
537  const LstmDescriptor& descriptor,
538  const LstmInputParamsInfo& paramsInfo,
539  Optional<std::string&> reasonIfUnsupported)
540 {
541  return m_LayerSupport->IsLstmSupported(input,
542  outputStateIn,
543  cellStateIn,
544  scratchBuffer,
545  outputStateOut,
546  cellStateOut,
547  output,
548  descriptor,
549  paramsInfo,
550  reasonIfUnsupported);
551 }
552 
554  const TensorInfo& input1,
555  const TensorInfo& output,
556  Optional<std::string&> reasonIfUnsupported)
557 {
558  return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
559 }
560 
562  const TensorInfo& output,
563  const MeanDescriptor& descriptor,
564  Optional<std::string&> reasonIfUnsupported)
565 {
566  return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
567 }
568 
570  const TensorInfo& output,
571  Optional<std::string&> reasonIfUnsupported)
572 {
573  return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
574 }
575 
577  const TensorInfo& output,
578  Optional<std::string&> reasonIfUnsupported)
579 {
580  return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
581 }
582 
584  const TensorInfo& input1,
585  const TensorInfo& output,
586  Optional<std::string&> reasonIfUnsupported)
587 {
588  return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
589 }
590 
591 bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
592  const TensorInfo& output,
593  const OriginsDescriptor& descriptor,
594  Optional<std::string&> reasonIfUnsupported)
595 {
596  return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
597 }
598 
600  const TensorInfo& input1,
601  const TensorInfo& output,
602  Optional<std::string&> reasonIfUnsupported)
603 {
604  return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
605 }
606 
608  const TensorInfo& input1,
609  const TensorInfo& output,
610  Optional<std::string&> reasonIfUnsupported)
611 {
612  return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
613 }
614 
616  const TensorInfo& output,
617  const NormalizationDescriptor& descriptor,
618  Optional<std::string&> reasonIfUnsupported)
619 {
620  return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
621 }
622 
624  Optional<std::string&> reasonIfUnsupported)
625 {
626  return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
627 }
628 
630  const TensorInfo& output,
631  const PadDescriptor& descriptor,
632  Optional<std::string&> reasonIfUnsupported)
633 {
634  return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
635 }
636 
638  const TensorInfo& output,
639  const PermuteDescriptor& descriptor,
640  Optional<std::string&> reasonIfUnsupported)
641 {
642  return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
643 }
644 
646  const TensorInfo& output,
647  const Pooling2dDescriptor& descriptor,
648  Optional<std::string&> reasonIfUnsupported)
649 {
650  return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
651 }
652 
654  const PreCompiledDescriptor& descriptor,
655  Optional<std::string&> reasonIfUnsupported)
656 {
657  return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
658 }
659 
661  const TensorInfo& alpha,
662  const TensorInfo& output,
663  Optional<std::string&> reasonIfUnsupported)
664 {
665  return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
666 }
667 
669  const TensorInfo& output,
670  Optional<std::string&> reasonIfUnsupported)
671 {
672  return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
673 }
674 
676  const TensorInfo& previousOutputIn,
677  const TensorInfo& previousCellStateIn,
678  const TensorInfo& outputStateOut,
679  const TensorInfo& cellStateOut,
680  const TensorInfo& output,
681  const QLstmDescriptor& descriptor,
682  const LstmInputParamsInfo& paramsInfo,
683  Optional<std::string&> reasonIfUnsupported)
684 {
685  return m_LayerSupport->IsQLstmSupported(input,
686  previousOutputIn,
687  previousCellStateIn,
688  outputStateOut,
689  cellStateOut,
690  output,
691  descriptor,
692  paramsInfo,
693  reasonIfUnsupported);
694 }
695 
697  const TensorInfo& previousCellStateIn,
698  const TensorInfo& previousOutputIn,
699  const TensorInfo& cellStateOut,
700  const TensorInfo& output,
701  const QuantizedLstmInputParamsInfo& paramsInfo,
702  Optional<std::string&> reasonIfUnsupported)
703 {
704  return m_LayerSupport->IsQuantizedLstmSupported(input,
705  previousCellStateIn,
706  previousOutputIn,
707  cellStateOut,
708  output,
709  paramsInfo,
710  reasonIfUnsupported);
711 }
712 
714  const TensorInfo& output,
715  Optional<std::string&> reasonIfUnsupported)
716 {
717  return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
718 }
719 
721  const TensorInfo& output,
722  const ReduceDescriptor& descriptor,
723  Optional<std::string&> reasonIfUnsupported)
724 {
725  return m_LayerSupport->IsReduceSupported(input, output, descriptor, reasonIfUnsupported.value());
726 }
727 
729  const TensorInfo& output,
730  const ReshapeDescriptor& descriptor,
731  Optional<std::string&> reasonIfUnsupported)
732 {
733  return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
734 }
735 
737  const TensorInfo& output,
738  Optional<std::string&> reasonIfUnsupported)
739 {
740  return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
741 }
742 
744  const TensorInfo& output,
745  const ResizeDescriptor& descriptor,
746  Optional<std::string&> reasonIfUnsupported)
747 {
748  return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
749 }
750 
752  const TensorInfo& output,
753  Optional<std::string&> reasonIfUnsupported)
754 {
755  return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
756 }
757 
759  const TensorInfo& output,
760  Optional<std::string&> reasonIfUnsupported)
761 {
762  return m_LayerSupport->IsShapeSupported(input, output, reasonIfUnsupported.value());
763 }
764 
766  const TensorInfo& output,
767  const SliceDescriptor& descriptor,
768  Optional<std::string&> reasonIfUnsupported)
769 {
770  return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
771 }
772 
774  const TensorInfo& output,
775  const SoftmaxDescriptor& descriptor,
776  Optional<std::string&> reasonIfUnsupported)
777 {
778  return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
779 }
780 
782  const TensorInfo& output,
783  const SpaceToBatchNdDescriptor& descriptor,
784  Optional<std::string&> reasonIfUnsupported)
785 {
786  return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
787 }
788 
790  const TensorInfo& output,
791  const SpaceToDepthDescriptor& descriptor,
792  Optional<std::string&> reasonIfUnsupported)
793 {
794  return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
795 }
796 
798  const ViewsDescriptor& descriptor,
799  Optional<std::string&> reasonIfUnsupported)
800 {
801  return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
802 }
803 
805  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
806  const ViewsDescriptor& descriptor,
807  Optional<std::string&> reasonIfUnsupported)
808 {
809  return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
810 }
811 
812 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
813  const TensorInfo& output,
814  const StackDescriptor& descriptor,
815  Optional<std::string&> reasonIfUnsupported)
816 {
817  return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
818 }
819 
820 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
821  const std::vector<const TensorInfo*>& outputs,
822  const StandInDescriptor& descriptor,
823  Optional<std::string&> reasonIfUnsupported)
824 {
825  return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
826 }
827 
828 
830  const TensorInfo& output,
831  const StridedSliceDescriptor& descriptor,
832  Optional<std::string&> reasonIfUnsupported)
833 {
834  return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
835 }
836 
838  const TensorInfo& input1,
839  const TensorInfo& output,
840  Optional<std::string&> reasonIfUnsupported)
841 {
842  return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
843 }
844 
846  const TensorInfo& input1,
847  const TensorInfo& output0,
848  const TensorInfo& output1,
849  Optional<std::string&> reasonIfUnsupported)
850 {
851  return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
852 }
853 
855  const TensorInfo& input,
856  const TensorInfo& output,
857  const TransposeConvolution2dDescriptor& descriptor,
858  const TensorInfo& weights,
859  const Optional<TensorInfo>& biases,
860  Optional<std::string&> reasonIfUnsupported)
861 {
862  return m_LayerSupport->IsTransposeConvolution2dSupported(input,
863  output,
864  descriptor,
865  weights,
866  biases,
867  reasonIfUnsupported.value());
868 }
869 
871  const TensorInfo& output,
872  const TransposeDescriptor& descriptor,
873  Optional<std::string&> reasonIfUnsupported)
874 {
875  return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
876 }
877 
879  const TensorInfo& outputStateIn,
880  const TensorInfo& cellStateIn,
881  const TensorInfo& output,
882  const Optional<TensorInfo>& hiddenStateOutput,
883  const Optional<TensorInfo>& cellStateOutput,
884  const LstmDescriptor& descriptor,
885  const LstmInputParamsInfo& paramsInfo,
886  Optional<std::string&> reasonIfUnsupported)
887 {
888  return m_LayerSupport->IsUnidirectionalSequenceLstmSupported(input,
889  outputStateIn,
890  cellStateIn,
891  output,
892  hiddenStateOutput,
893  cellStateOutput,
894  descriptor,
895  paramsInfo,
896  reasonIfUnsupported);
897 }
898 
899 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const BackendOption & GetOption(size_t idx) const
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConstant() const
Definition: Tensor.cpp:511
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsEqualSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeDescriptor for the ResizeLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
size_t GetOptionCount() const noexcept
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A L2NormalizationDescriptor for the L2NormalizationLayer.
BackendCapability
BackendCapability class.
Definition: Types.hpp:221
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
std::string AsString() const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
unsigned int AsUnsignedInt() const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool AsBool() const
Value getters.
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBool() const
Type getters.
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:139
A SoftmaxDescriptor for the SoftmaxLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_ConstantWeights
Enable/disable constant weights and biases.