ArmNN
 21.05
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 
10 
11 namespace armnn
12 {
13 
14 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
16 {
18 
19  if (!backendRegistry.IsBackendRegistered(backend))
20  {
21  return LayerSupportHandle(nullptr);
22  }
23 
24  auto factoryFunc = backendRegistry.GetFactory(backend);
25  auto backendObject = factoryFunc();
26  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
27 }
28 
29 /// Convenience function to check a capability on a backend
31 {
32  bool hasCapability = false;
33  auto const& backendRegistry = armnn::BackendRegistryInstance();
34  if (backendRegistry.IsBackendRegistered(backend))
35  {
36  auto factoryFunc = backendRegistry.GetFactory(backend);
37  auto backendObject = factoryFunc();
38  hasCapability = backendObject->HasCapability(capability);
39  }
40  return hasCapability;
41 }
42 
44 {
45  if (m_LayerSupport)
46  {
47  return true;
48  }
49 
50  return false;
51 }
52 
53 
55  const TensorInfo& output,
56  Optional<std::string&> reasonIfUnsupported)
57 {
58  // Call the IsXXXLayerSupport function of the specific backend.
59  return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
60 }
61 
63  const TensorInfo& output,
64  const ActivationDescriptor& descriptor,
65  Optional<std::string&> reasonIfUnsupported)
66 {
67  return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
68 }
69 
71  const TensorInfo& input1,
72  const TensorInfo& output,
73  Optional<std::string&> reasonIfUnsupported)
74 {
75  return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
76 }
77 
79  const TensorInfo& output,
80  const ArgMinMaxDescriptor& descriptor,
81  Optional<std::string&> reasonIfUnsupported)
82 {
83  return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
84 }
85 
87  const TensorInfo& output,
88  const TensorInfo& mean,
89  const TensorInfo& var,
90  const TensorInfo& beta,
91  const TensorInfo& gamma,
92  const BatchNormalizationDescriptor& descriptor,
93  Optional<std::string&> reasonIfUnsupported)
94 {
95  return m_LayerSupport->IsBatchNormalizationSupported(input,
96  output,
97  mean,
98  var,
99  beta,
100  gamma,
101  descriptor,
102  reasonIfUnsupported.value());
103 }
104 
106  const TensorInfo& output,
107  const BatchToSpaceNdDescriptor& descriptor,
108  Optional<std::string&> reasonIfUnsupported)
109 {
110  return m_LayerSupport->IsBatchToSpaceNdSupported(input,
111  output,
112  descriptor,
113  reasonIfUnsupported.value());
114 }
115 
117  const TensorInfo& output,
118  Optional<std::string&> reasonIfUnsupported)
119 {
120  return m_LayerSupport->IsCastSupported(input, output, reasonIfUnsupported.value());
121 }
122 
124  const TensorInfo& input1,
125  const TensorInfo& output,
126  const ComparisonDescriptor& descriptor,
127  Optional<std::string&> reasonIfUnsupported)
128 {
129  return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
130 }
131 
132 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
133  const TensorInfo& output,
134  const OriginsDescriptor& descriptor,
135  Optional<std::string&> reasonIfUnsupported)
136 {
137  return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
138 }
139 
141  Optional<std::string&> reasonIfUnsupported)
142 {
143  return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
144 }
145 
147  const TensorInfo& output,
148  Optional<std::string&> reasonIfUnsupported)
149 {
150  return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
151 }
152 
154  const TensorInfo& output,
155  Optional<std::string&> reasonIfUnsupported)
156 {
157  return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
158 }
159 
161  const TensorInfo& output,
162  Optional<std::string&> reasonIfUnsupported)
163 {
164  return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
165 }
166 
168  const TensorInfo& output,
169  Optional<std::string&> reasonIfUnsupported)
170 {
171  return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
172 }
173 
175  const TensorInfo& output,
176  const Convolution2dDescriptor& descriptor,
177  const TensorInfo& weights,
178  const Optional<TensorInfo>& biases,
179  Optional<std::string&> reasonIfUnsupported)
180 {
181  return m_LayerSupport->IsConvolution2dSupported(input,
182  output,
183  descriptor,
184  weights,
185  biases,
186  reasonIfUnsupported.value());
187 }
188 
190  const TensorInfo& output,
191  Optional<std::string&> reasonIfUnsupported)
192 {
193  return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
194 }
195 
197  const TensorInfo& output,
198  const DepthToSpaceDescriptor& descriptor,
199  Optional<std::string&> reasonIfUnsupported)
200 {
201  return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
202 }
203 
205  const TensorInfo& input,
206  const TensorInfo& output,
207  const DepthwiseConvolution2dDescriptor& descriptor,
208  const TensorInfo& weights,
209  const Optional<TensorInfo>& biases,
210  Optional<std::string&> reasonIfUnsupported)
211 {
212  return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
213  output,
214  descriptor,
215  weights,
216  biases,
217  reasonIfUnsupported.value());
218 }
219 
221  const TensorInfo& output,
222  Optional<std::string&> reasonIfUnsupported)
223 {
224  return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
225 }
226 
228  const TensorInfo& scores,
229  const TensorInfo& anchors,
230  const TensorInfo& detectionBoxes,
231  const TensorInfo& detectionClasses,
232  const TensorInfo& detectionScores,
233  const TensorInfo& numDetections,
234  const DetectionPostProcessDescriptor& descriptor,
235  Optional<std::string&> reasonIfUnsupported)
236 {
237  return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
238  scores,
239  anchors,
240  detectionBoxes,
241  detectionClasses,
242  detectionScores,
243  numDetections,
244  descriptor,
245  reasonIfUnsupported);
246 }
247 
249  const TensorInfo& input,
250  const TensorInfo& output,
251  const DepthwiseConvolution2dDescriptor& descriptor,
252  const TensorInfo& weights,
253  const Optional<TensorInfo>& biases,
254  Optional<std::string&> reasonIfUnsupported)
255 {
256  return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
257  output,
258  descriptor,
259  weights,
260  biases,
261  reasonIfUnsupported);
262 }
263 
265  const TensorInfo& input1,
266  const TensorInfo& output,
267  Optional<std::string&> reasonIfUnsupported)
268 {
269  return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
270 }
271 
273  const TensorInfo& output,
274  const ElementwiseUnaryDescriptor& descriptor,
275  Optional<std::string&> reasonIfUnsupported)
276 {
277  return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
278 }
279 
281  const TensorInfo& input1,
282  const TensorInfo& output,
283  Optional<std::string&> reasonIfUnsupported)
284 {
285  return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
286 }
287 
289  const FakeQuantizationDescriptor& descriptor,
290  Optional<std::string&> reasonIfUnsupported)
291 {
292  return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
293 }
294 
296  const TensorInfo& output,
297  const FillDescriptor& descriptor,
298  Optional<std::string&> reasonIfUnsupported)
299 {
300  return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
301 }
302 
304  const TensorInfo& output,
305  Optional<std::string&> reasonIfUnsupported)
306 {
307  return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
308 }
309 
311  const TensorInfo& output,
312  const TensorInfo& weights,
313  const TensorInfo& biases,
314  const FullyConnectedDescriptor& descriptor,
315  Optional<std::string&> reasonIfUnsupported)
316 {
317  if(!descriptor.m_ConstantWeights && !m_BackendId.IsUndefined())
318  {
319  bool result = false;
321  if (!result)
322  {
323  return result;
324  }
325  }
326 
327  return m_LayerSupport->IsFullyConnectedSupported(input,
328  output,
329  weights,
330  biases,
331  descriptor,
332  reasonIfUnsupported.value());
333 }
334 
336  const TensorInfo& input1,
337  const TensorInfo& output,
338  Optional<std::string&> reasonIfUnsupported)
339 {
340  return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
341 }
342 
344  const TensorInfo& input1,
345  const TensorInfo& output,
346  const GatherDescriptor& descriptor,
347  Optional<std::string&> reasonIfUnsupported)
348 {
349  return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
350 }
351 
353  const TensorInfo& input1,
354  const TensorInfo& ouput,
355  Optional<std::string&> reasonIfUnsupported)
356 {
357  return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
358 }
359 
361  Optional<std::string&> reasonIfUnsupported)
362 {
363  return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
364 }
365 
367  const TensorInfo& input,
368  const TensorInfo& output,
369  const InstanceNormalizationDescriptor& descriptor,
370  Optional<std::string&> reasonIfUnsupported)
371 {
372  return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
373 }
374 
376  const TensorInfo& output,
377  const L2NormalizationDescriptor& descriptor,
378  Optional<std::string&> reasonIfUnsupported)
379 {
380  return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
381 }
382 
384  const TensorInfo& input1,
385  const TensorInfo& output,
386  const LogicalBinaryDescriptor& descriptor,
387  Optional<std::string&> reasonIfUnsupported)
388 {
389  return m_LayerSupport->IsLogicalBinarySupported(input0,
390  input1,
391  output,
392  descriptor,
393  reasonIfUnsupported.value());
394 }
395 
397  const TensorInfo& output,
398  const ElementwiseUnaryDescriptor& descriptor,
399  Optional<std::string&> reasonIfUnsupported)
400 {
401  return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
402 }
403 
405  const TensorInfo& output,
406  const LogSoftmaxDescriptor& descriptor,
407  Optional<std::string&> reasonIfUnsupported)
408 {
409  return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
410 }
411 
413  const TensorInfo& outputStateIn,
414  const TensorInfo& cellStateIn,
415  const TensorInfo& scratchBuffer,
416  const TensorInfo& outputStateOut,
417  const TensorInfo& cellStateOut,
418  const TensorInfo& output,
419  const LstmDescriptor& descriptor,
420  const LstmInputParamsInfo& paramsInfo,
421  Optional<std::string&> reasonIfUnsupported)
422 {
423  return m_LayerSupport->IsLstmSupported(input,
424  outputStateIn,
425  cellStateIn,
426  scratchBuffer,
427  outputStateOut,
428  cellStateOut,
429  output,
430  descriptor,
431  paramsInfo,
432  reasonIfUnsupported);
433 }
434 
436  const TensorInfo& input1,
437  const TensorInfo& output,
438  Optional<std::string&> reasonIfUnsupported)
439 {
440  return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
441 }
442 
444  const TensorInfo& output,
445  const MeanDescriptor& descriptor,
446  Optional<std::string&> reasonIfUnsupported)
447 {
448  return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
449 }
450 
452  const TensorInfo& output,
453  Optional<std::string&> reasonIfUnsupported)
454 {
455  return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
456 }
457 
459  const TensorInfo& output,
460  Optional<std::string&> reasonIfUnsupported)
461 {
462  return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
463 }
464 
466  const TensorInfo& input1,
467  const TensorInfo& output,
468  Optional<std::string&> reasonIfUnsupported)
469 {
470  return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
471 }
472 
473 bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
474  const TensorInfo& output,
475  const OriginsDescriptor& descriptor,
476  Optional<std::string&> reasonIfUnsupported)
477 {
478  return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
479 }
480 
482  const TensorInfo& input1,
483  const TensorInfo& output,
484  Optional<std::string&> reasonIfUnsupported)
485 {
486  return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
487 }
488 
490  const TensorInfo& input1,
491  const TensorInfo& output,
492  Optional<std::string&> reasonIfUnsupported)
493 {
494  return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
495 }
496 
498  const TensorInfo& output,
499  const NormalizationDescriptor& descriptor,
500  Optional<std::string&> reasonIfUnsupported)
501 {
502  return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
503 }
504 
506  Optional<std::string&> reasonIfUnsupported)
507 {
508  return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
509 }
510 
512  const TensorInfo& output,
513  const PadDescriptor& descriptor,
514  Optional<std::string&> reasonIfUnsupported)
515 {
516  return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
517 }
518 
520  const TensorInfo& output,
521  const PermuteDescriptor& descriptor,
522  Optional<std::string&> reasonIfUnsupported)
523 {
524  return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
525 }
526 
528  const TensorInfo& output,
529  const Pooling2dDescriptor& descriptor,
530  Optional<std::string&> reasonIfUnsupported)
531 {
532  return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
533 }
534 
536  const PreCompiledDescriptor& descriptor,
537  Optional<std::string&> reasonIfUnsupported)
538 {
539  return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
540 }
541 
543  const TensorInfo& alpha,
544  const TensorInfo& output,
545  Optional<std::string&> reasonIfUnsupported)
546 {
547  return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
548 }
549 
551  const TensorInfo& output,
552  Optional<std::string&> reasonIfUnsupported)
553 {
554  return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
555 }
556 
558  const TensorInfo& previousOutputIn,
559  const TensorInfo& previousCellStateIn,
560  const TensorInfo& outputStateOut,
561  const TensorInfo& cellStateOut,
562  const TensorInfo& output,
563  const QLstmDescriptor& descriptor,
564  const LstmInputParamsInfo& paramsInfo,
565  Optional<std::string&> reasonIfUnsupported)
566 {
567  return m_LayerSupport->IsQLstmSupported(input,
568  previousOutputIn,
569  previousCellStateIn,
570  outputStateOut,
571  cellStateOut,
572  output,
573  descriptor,
574  paramsInfo,
575  reasonIfUnsupported);
576 }
577 
579  const TensorInfo& previousCellStateIn,
580  const TensorInfo& previousOutputIn,
581  const TensorInfo& cellStateOut,
582  const TensorInfo& output,
583  const QuantizedLstmInputParamsInfo& paramsInfo,
584  Optional<std::string&> reasonIfUnsupported)
585 {
586  return m_LayerSupport->IsQuantizedLstmSupported(input,
587  previousCellStateIn,
588  previousOutputIn,
589  cellStateOut,
590  output,
591  paramsInfo,
592  reasonIfUnsupported);
593 }
594 
596  const TensorInfo& output,
597  Optional<std::string&> reasonIfUnsupported)
598 {
599  return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
600 }
601 
603  const TensorInfo& output,
604  const ReduceDescriptor& descriptor,
605  Optional<std::string&> reasonIfUnsupported)
606 {
607  return m_LayerSupport->IsReduceSupported(input, output, descriptor, reasonIfUnsupported.value());
608 }
609 
611  const TensorInfo& output,
612  const ReshapeDescriptor& descriptor,
613  Optional<std::string&> reasonIfUnsupported)
614 {
615  return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
616 }
617 
619  const TensorInfo& output,
620  Optional<std::string&> reasonIfUnsupported)
621 {
622  return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
623 }
624 
626  const TensorInfo& output,
627  const ResizeDescriptor& descriptor,
628  Optional<std::string&> reasonIfUnsupported)
629 {
630  return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
631 }
632 
634  const TensorInfo& output,
635  Optional<std::string&> reasonIfUnsupported)
636 {
637  return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
638 }
639 
641  const TensorInfo& output,
642  const SliceDescriptor& descriptor,
643  Optional<std::string&> reasonIfUnsupported)
644 {
645  return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
646 }
647 
649  const TensorInfo& output,
650  const SoftmaxDescriptor& descriptor,
651  Optional<std::string&> reasonIfUnsupported)
652 {
653  return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
654 }
655 
657  const TensorInfo& output,
658  const SpaceToBatchNdDescriptor& descriptor,
659  Optional<std::string&> reasonIfUnsupported)
660 {
661  return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
662 }
663 
665  const TensorInfo& output,
666  const SpaceToDepthDescriptor& descriptor,
667  Optional<std::string&> reasonIfUnsupported)
668 {
669  return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
670 }
671 
673  const ViewsDescriptor& descriptor,
674  Optional<std::string&> reasonIfUnsupported)
675 {
676  return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
677 }
678 
680  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
681  const ViewsDescriptor& descriptor,
682  Optional<std::string&> reasonIfUnsupported)
683 {
684  return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
685 }
686 
687 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
688  const TensorInfo& output,
689  const StackDescriptor& descriptor,
690  Optional<std::string&> reasonIfUnsupported)
691 {
692  return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
693 }
694 
695 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
696  const std::vector<const TensorInfo*>& outputs,
697  const StandInDescriptor& descriptor,
698  Optional<std::string&> reasonIfUnsupported)
699 {
700  return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
701 }
702 
703 
705  const TensorInfo& output,
706  const StridedSliceDescriptor& descriptor,
707  Optional<std::string&> reasonIfUnsupported)
708 {
709  return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
710 }
711 
713  const TensorInfo& input1,
714  const TensorInfo& output,
715  Optional<std::string&> reasonIfUnsupported)
716 {
717  return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
718 }
719 
721  const TensorInfo& input1,
722  const TensorInfo& output0,
723  const TensorInfo& output1,
724  Optional<std::string&> reasonIfUnsupported)
725 {
726  return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
727 }
728 
730  const TensorInfo& input,
731  const TensorInfo& output,
732  const TransposeConvolution2dDescriptor& descriptor,
733  const TensorInfo& weights,
734  const Optional<TensorInfo>& biases,
735  Optional<std::string&> reasonIfUnsupported)
736 {
737  return m_LayerSupport->IsTransposeConvolution2dSupported(input,
738  output,
739  descriptor,
740  weights,
741  biases,
742  reasonIfUnsupported.value());
743 }
744 
746  const TensorInfo& output,
747  const TransposeDescriptor& descriptor,
748  Optional<std::string&> reasonIfUnsupported)
749 {
750  return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
751 }
752 
753 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsEqualSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeDescriptor for the ResizeLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Constant weights can be accessed through the descriptors, On the other hand, non-const weights can be...
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
BackendCapability
BackendCapability class.
Definition: Types.hpp:220
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered() const
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:139
A SoftmaxDescriptor for the SoftmaxLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_ConstantWeights
Enable/disable constant weights and biases.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })