ArmNN
 21.02
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 
10 
11 namespace armnn
12 {
13 
14 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
16 {
18 
19  if (!backendRegistry.IsBackendRegistered(backend))
20  {
21  return LayerSupportHandle(nullptr);
22  }
23 
24  auto factoryFunc = backendRegistry.GetFactory(backend);
25  auto backendObject = factoryFunc();
26  return LayerSupportHandle(backendObject->GetLayerSupport());
27 }
28 
30 {
31  if (m_LayerSupport)
32  {
33  return true;
34  }
35 
36  return false;
37 }
38 
39 
41  const TensorInfo& output,
42  Optional<std::string&> reasonIfUnsupported)
43 {
44  // Call the IsXXXLayerSupport function of the specific backend.
45  return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
46 }
47 
49  const TensorInfo& output,
50  const ActivationDescriptor& descriptor,
51  Optional<std::string&> reasonIfUnsupported)
52 {
53  return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
54 }
55 
57  const TensorInfo& input1,
58  const TensorInfo& output,
59  Optional<std::string&> reasonIfUnsupported)
60 {
61  return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
62 }
63 
65  const TensorInfo& output,
66  const ArgMinMaxDescriptor& descriptor,
67  Optional<std::string&> reasonIfUnsupported)
68 {
69  return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
70 }
71 
73  const TensorInfo& output,
74  const TensorInfo& mean,
75  const TensorInfo& var,
76  const TensorInfo& beta,
77  const TensorInfo& gamma,
78  const BatchNormalizationDescriptor& descriptor,
79  Optional<std::string&> reasonIfUnsupported)
80 {
81  return m_LayerSupport->IsBatchNormalizationSupported(input,
82  output,
83  mean,
84  var,
85  beta,
86  gamma,
87  descriptor,
88  reasonIfUnsupported.value());
89 }
90 
92  const TensorInfo& output,
93  const BatchToSpaceNdDescriptor& descriptor,
94  Optional<std::string&> reasonIfUnsupported)
95 {
96  return m_LayerSupport->IsBatchToSpaceNdSupported(input,
97  output,
98  descriptor,
99  reasonIfUnsupported.value());
100 }
101 
103  const TensorInfo& input1,
104  const TensorInfo& output,
105  const ComparisonDescriptor& descriptor,
106  Optional<std::string&> reasonIfUnsupported)
107 {
108  return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
109 }
110 
111 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
112  const TensorInfo& output,
113  const OriginsDescriptor& descriptor,
114  Optional<std::string&> reasonIfUnsupported)
115 {
116  return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
117 }
118 
120  Optional<std::string&> reasonIfUnsupported)
121 {
122  return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
123 }
124 
126  const TensorInfo& output,
127  Optional<std::string&> reasonIfUnsupported)
128 {
129  return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
130 }
131 
133  const TensorInfo& output,
134  Optional<std::string&> reasonIfUnsupported)
135 {
136  return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
137 }
138 
140  const TensorInfo& output,
141  Optional<std::string&> reasonIfUnsupported)
142 {
143  return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
144 }
145 
147  const TensorInfo& output,
148  Optional<std::string&> reasonIfUnsupported)
149 {
150  return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
151 }
152 
154  const TensorInfo& output,
155  const Convolution2dDescriptor& descriptor,
156  const TensorInfo& weights,
157  const Optional<TensorInfo>& biases,
158  Optional<std::string&> reasonIfUnsupported)
159 {
160  return m_LayerSupport->IsConvolution2dSupported(input,
161  output,
162  descriptor,
163  weights,
164  biases,
165  reasonIfUnsupported.value());
166 }
167 
169  const TensorInfo& output,
170  Optional<std::string&> reasonIfUnsupported)
171 {
172  return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
173 }
174 
176  const TensorInfo& output,
177  const DepthToSpaceDescriptor& descriptor,
178  Optional<std::string&> reasonIfUnsupported)
179 {
180  return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
181 }
182 
184  const TensorInfo& input,
185  const TensorInfo& output,
186  const DepthwiseConvolution2dDescriptor& descriptor,
187  const TensorInfo& weights,
188  const Optional<TensorInfo>& biases,
189  Optional<std::string&> reasonIfUnsupported)
190 {
191  return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
192  output,
193  descriptor,
194  weights,
195  biases,
196  reasonIfUnsupported.value());
197 }
198 
200  const TensorInfo& output,
201  Optional<std::string&> reasonIfUnsupported)
202 {
203  return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
204 }
205 
207  const TensorInfo& scores,
208  const TensorInfo& anchors,
209  const TensorInfo& detectionBoxes,
210  const TensorInfo& detectionClasses,
211  const TensorInfo& detectionScores,
212  const TensorInfo& numDetections,
213  const DetectionPostProcessDescriptor& descriptor,
214  Optional<std::string&> reasonIfUnsupported)
215 {
216  return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
217  scores,
218  anchors,
219  detectionBoxes,
220  detectionClasses,
221  detectionScores,
222  numDetections,
223  descriptor,
224  reasonIfUnsupported);
225 }
226 
228  const TensorInfo& input,
229  const TensorInfo& output,
230  const DepthwiseConvolution2dDescriptor& descriptor,
231  const TensorInfo& weights,
232  const Optional<TensorInfo>& biases,
233  Optional<std::string&> reasonIfUnsupported)
234 {
235  return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
236  output,
237  descriptor,
238  weights,
239  biases,
240  reasonIfUnsupported);
241 }
242 
244  const TensorInfo& input1,
245  const TensorInfo& output,
246  Optional<std::string&> reasonIfUnsupported)
247 {
248  return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
249 }
250 
252  const TensorInfo& output,
253  const ElementwiseUnaryDescriptor& descriptor,
254  Optional<std::string&> reasonIfUnsupported)
255 {
256  return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
257 }
258 
260  const TensorInfo& input1,
261  const TensorInfo& output,
262  Optional<std::string&> reasonIfUnsupported)
263 {
264  return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
265 }
266 
268  const FakeQuantizationDescriptor& descriptor,
269  Optional<std::string&> reasonIfUnsupported)
270 {
271  return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
272 }
273 
275  const TensorInfo& output,
276  const FillDescriptor& descriptor,
277  Optional<std::string&> reasonIfUnsupported)
278 {
279  return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
280 }
281 
283  const TensorInfo& output,
284  Optional<std::string&> reasonIfUnsupported)
285 {
286  return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
287 }
288 
290  const TensorInfo& output,
291  const TensorInfo& weights,
292  const TensorInfo& biases,
293  const FullyConnectedDescriptor& descriptor,
294  Optional<std::string&> reasonIfUnsupported)
295 {
296  return m_LayerSupport->IsFullyConnectedSupported(input,
297  output,
298  weights,
299  biases,
300  descriptor,
301  reasonIfUnsupported.value());
302 }
303 
305  const TensorInfo& input1,
306  const TensorInfo& output,
307  Optional<std::string&> reasonIfUnsupported)
308 {
309  return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
310 }
311 
313  const TensorInfo& input1,
314  const TensorInfo& output,
315  const GatherDescriptor& descriptor,
316  Optional<std::string&> reasonIfUnsupported)
317 {
318  return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
319 }
320 
322  const TensorInfo& input1,
323  const TensorInfo& ouput,
324  Optional<std::string&> reasonIfUnsupported)
325 {
326  return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
327 }
328 
330  Optional<std::string&> reasonIfUnsupported)
331 {
332  return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
333 }
334 
336  const TensorInfo& input,
337  const TensorInfo& output,
338  const InstanceNormalizationDescriptor& descriptor,
339  Optional<std::string&> reasonIfUnsupported)
340 {
341  return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
342 }
343 
345  const TensorInfo& output,
346  const L2NormalizationDescriptor& descriptor,
347  Optional<std::string&> reasonIfUnsupported)
348 {
349  return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
350 }
351 
353  const TensorInfo& input1,
354  const TensorInfo& output,
355  const LogicalBinaryDescriptor& descriptor,
356  Optional<std::string&> reasonIfUnsupported)
357 {
358  return m_LayerSupport->IsLogicalBinarySupported(input0,
359  input1,
360  output,
361  descriptor,
362  reasonIfUnsupported.value());
363 }
364 
366  const TensorInfo& output,
367  const ElementwiseUnaryDescriptor& descriptor,
368  Optional<std::string&> reasonIfUnsupported)
369 {
370  return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
371 }
372 
374  const TensorInfo& output,
375  const LogSoftmaxDescriptor& descriptor,
376  Optional<std::string&> reasonIfUnsupported)
377 {
378  return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
379 }
380 
382  const TensorInfo& outputStateIn,
383  const TensorInfo& cellStateIn,
384  const TensorInfo& scratchBuffer,
385  const TensorInfo& outputStateOut,
386  const TensorInfo& cellStateOut,
387  const TensorInfo& output,
388  const LstmDescriptor& descriptor,
389  const LstmInputParamsInfo& paramsInfo,
390  Optional<std::string&> reasonIfUnsupported)
391 {
392  return m_LayerSupport->IsLstmSupported(input,
393  outputStateIn,
394  cellStateIn,
395  scratchBuffer,
396  outputStateOut,
397  cellStateOut,
398  output,
399  descriptor,
400  paramsInfo,
401  reasonIfUnsupported);
402 }
403 
405  const TensorInfo& input1,
406  const TensorInfo& output,
407  Optional<std::string&> reasonIfUnsupported)
408 {
409  return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
410 }
411 
413  const TensorInfo& output,
414  const MeanDescriptor& descriptor,
415  Optional<std::string&> reasonIfUnsupported)
416 {
417  return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
418 }
419 
421  const TensorInfo& output,
422  Optional<std::string&> reasonIfUnsupported)
423 {
424  return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
425 }
426 
428  const TensorInfo& output,
429  Optional<std::string&> reasonIfUnsupported)
430 {
431  return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
432 }
433 
435  const TensorInfo& input1,
436  const TensorInfo& output,
437  Optional<std::string&> reasonIfUnsupported)
438 {
439  return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
440 }
441 
442 bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
443  const TensorInfo& output,
444  const OriginsDescriptor& descriptor,
445  Optional<std::string&> reasonIfUnsupported)
446 {
447  return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
448 }
449 
451  const TensorInfo& input1,
452  const TensorInfo& output,
453  Optional<std::string&> reasonIfUnsupported)
454 {
455  return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
456 }
457 
459  const TensorInfo& input1,
460  const TensorInfo& output,
461  Optional<std::string&> reasonIfUnsupported)
462 {
463  return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
464 }
465 
467  const TensorInfo& output,
468  const NormalizationDescriptor& descriptor,
469  Optional<std::string&> reasonIfUnsupported)
470 {
471  return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
472 }
473 
475  Optional<std::string&> reasonIfUnsupported)
476 {
477  return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
478 }
479 
481  const TensorInfo& output,
482  const PadDescriptor& descriptor,
483  Optional<std::string&> reasonIfUnsupported)
484 {
485  return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
486 }
487 
489  const TensorInfo& output,
490  const PermuteDescriptor& descriptor,
491  Optional<std::string&> reasonIfUnsupported)
492 {
493  return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
494 }
495 
497  const TensorInfo& output,
498  const Pooling2dDescriptor& descriptor,
499  Optional<std::string&> reasonIfUnsupported)
500 {
501  return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
502 }
503 
505  const PreCompiledDescriptor& descriptor,
506  Optional<std::string&> reasonIfUnsupported)
507 {
508  return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
509 }
510 
512  const TensorInfo& alpha,
513  const TensorInfo& output,
514  Optional<std::string&> reasonIfUnsupported)
515 {
516  return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
517 }
518 
520  const TensorInfo& output,
521  Optional<std::string&> reasonIfUnsupported)
522 {
523  return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
524 }
525 
527  const TensorInfo& previousOutputIn,
528  const TensorInfo& previousCellStateIn,
529  const TensorInfo& outputStateOut,
530  const TensorInfo& cellStateOut,
531  const TensorInfo& output,
532  const QLstmDescriptor& descriptor,
533  const LstmInputParamsInfo& paramsInfo,
534  Optional<std::string&> reasonIfUnsupported)
535 {
536  return m_LayerSupport->IsQLstmSupported(input,
537  previousOutputIn,
538  previousCellStateIn,
539  outputStateOut,
540  cellStateOut,
541  output,
542  descriptor,
543  paramsInfo,
544  reasonIfUnsupported);
545 }
546 
548  const TensorInfo& previousCellStateIn,
549  const TensorInfo& previousOutputIn,
550  const TensorInfo& cellStateOut,
551  const TensorInfo& output,
552  const QuantizedLstmInputParamsInfo& paramsInfo,
553  Optional<std::string&> reasonIfUnsupported)
554 {
555  return m_LayerSupport->IsQuantizedLstmSupported(input,
556  previousCellStateIn,
557  previousOutputIn,
558  cellStateOut,
559  output,
560  paramsInfo,
561  reasonIfUnsupported);
562 }
563 
565  const TensorInfo& output,
566  Optional<std::string&> reasonIfUnsupported)
567 {
568  return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
569 }
570 
572  const TensorInfo& output,
573  const ReduceDescriptor& descriptor,
574  Optional<std::string&> reasonIfUnsupported)
575 {
576  return m_LayerSupport->IsReduceSupported(input, output, descriptor, reasonIfUnsupported.value());
577 }
578 
580  const TensorInfo& output,
581  const ReshapeDescriptor& descriptor,
582  Optional<std::string&> reasonIfUnsupported)
583 {
584  return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
585 }
586 
588  const TensorInfo& output,
589  Optional<std::string&> reasonIfUnsupported)
590 {
591  return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
592 }
593 
595  const TensorInfo& output,
596  const ResizeDescriptor& descriptor,
597  Optional<std::string&> reasonIfUnsupported)
598 {
599  return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
600 }
601 
603  const TensorInfo& output,
604  Optional<std::string&> reasonIfUnsupported)
605 {
606  return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
607 }
608 
610  const TensorInfo& output,
611  const SliceDescriptor& descriptor,
612  Optional<std::string&> reasonIfUnsupported)
613 {
614  return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
615 }
616 
618  const TensorInfo& output,
619  const SoftmaxDescriptor& descriptor,
620  Optional<std::string&> reasonIfUnsupported)
621 {
622  return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
623 }
624 
626  const TensorInfo& output,
627  const SpaceToBatchNdDescriptor& descriptor,
628  Optional<std::string&> reasonIfUnsupported)
629 {
630  return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
631 }
632 
634  const TensorInfo& output,
635  const SpaceToDepthDescriptor& descriptor,
636  Optional<std::string&> reasonIfUnsupported)
637 {
638  return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
639 }
640 
642  const ViewsDescriptor& descriptor,
643  Optional<std::string&> reasonIfUnsupported)
644 {
645  return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
646 }
647 
649  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
650  const ViewsDescriptor& descriptor,
651  Optional<std::string&> reasonIfUnsupported)
652 {
653  return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
654 }
655 
656 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
657  const TensorInfo& output,
658  const StackDescriptor& descriptor,
659  Optional<std::string&> reasonIfUnsupported)
660 {
661  return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
662 }
663 
664 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
665  const std::vector<const TensorInfo*>& outputs,
666  const StandInDescriptor& descriptor,
667  Optional<std::string&> reasonIfUnsupported)
668 {
669  return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
670 }
671 
672 
674  const TensorInfo& output,
675  const StridedSliceDescriptor& descriptor,
676  Optional<std::string&> reasonIfUnsupported)
677 {
678  return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
679 }
680 
682  const TensorInfo& input1,
683  const TensorInfo& output,
684  Optional<std::string&> reasonIfUnsupported)
685 {
686  return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
687 }
688 
690  const TensorInfo& input1,
691  const TensorInfo& output0,
692  const TensorInfo& output1,
693  Optional<std::string&> reasonIfUnsupported)
694 {
695  return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
696 }
697 
699  const TensorInfo& input,
700  const TensorInfo& output,
701  const TransposeConvolution2dDescriptor& descriptor,
702  const TensorInfo& weights,
703  const Optional<TensorInfo>& biases,
704  Optional<std::string&> reasonIfUnsupported)
705 {
706  return m_LayerSupport->IsTransposeConvolution2dSupported(input,
707  output,
708  descriptor,
709  weights,
710  biases,
711  reasonIfUnsupported.value());
712 }
713 
715  const TensorInfo& output,
716  const TransposeDescriptor& descriptor,
717  Optional<std::string&> reasonIfUnsupported)
718 {
719  return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
720 }
721 
722 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsEqualSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeDescriptor for the ResizeLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered() const
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SoftmaxDescriptor for the SoftmaxLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })