ArmNN
 22.02
LayerSupportBase.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <armnn/Deprecated.hpp>
7 #include <armnn/Exceptions.hpp>
8 #include <armnn/Types.hpp>
9 
11 
14 
15 namespace
16 {
17 
18 bool DefaultLayerSupport(const char* func,
19  const char* file,
20  unsigned int line,
21  armnn::Optional<std::string&> reasonIfUnsupported)
22 {
23  // NOTE: We only need to return the reason if the optional parameter is not empty
24  if (reasonIfUnsupported)
25  {
26  std::stringstream message;
27  message << func << " is not implemented [" << file << ":" << line << "]";
28 
29  reasonIfUnsupported.value() = message.str();
30  }
31 
32  return false;
33 }
34 
35 } // anonymous namespace
36 
37 namespace armnn
38 {
39 
41  const std::vector<TensorInfo>& infos,
42  const BaseDescriptor& descriptor,
45  Optional<std::string&> reasonIfUnsupported) const
46 {
47  switch(type)
48  {
49  case LayerType::MemCopy:
50  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
52  return IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
53  case LayerType::StandIn:
54  {
55  auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor));
56 
57  if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs))
58  {
59  throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal "
60  "the combined number of input and output slots assigned "
61  "to the StandIn descriptor");
62  }
63 
64  std::vector<const TensorInfo*> inputInfos;
65  for (uint32_t i = 0; i < desc.m_NumInputs; i++)
66  {
67  inputInfos.push_back(&infos[i]);
68  }
69  std::vector<const TensorInfo*> outputInfos;
70  for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++)
71  {
72  outputInfos.push_back(&infos[i]);
73  }
74 
75  return IsStandInSupported(inputInfos,
76  outputInfos,
77  desc,
78  reasonIfUnsupported);
79  }
80  default:
81  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
82  }
83 }
84 
86  const TensorInfo&, //output
87  const ActivationDescriptor&, // descriptor
88  Optional<std::string&> reasonIfUnsupported) const
89 {
90  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
91 }
92 
94  const TensorInfo&, // input1
95  const TensorInfo&, // output
96  Optional<std::string&> reasonIfUnsupported) const
97 {
98  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
99 }
100 
102  const armnn::TensorInfo&, // output
103  const armnn::ArgMinMaxDescriptor&, // descriptor
104  armnn::Optional<std::string &> reasonIfUnsupported) const
105 {
106  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
107 }
108 
110  const TensorInfo&, // output
111  const TensorInfo&, //mean
112  const TensorInfo&, //var
113  const TensorInfo&, //beta
114  const TensorInfo&, //gamma
115  const BatchNormalizationDescriptor&, // descriptor
116  Optional<std::string&> reasonIfUnsupported) const
117 {
118  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
119 }
120 
122  const TensorInfo&, // output
123  const BatchToSpaceNdDescriptor&, //descriptor
124  Optional<std::string&> reasonIfUnsupported) const
125 {
126  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
127 }
128 
130  const TensorInfo&, //output
131  Optional<std::string &> reasonIfUnsupported) const
132 {
133  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
134 }
135 
137  const TensorInfo&, //output
138  const ChannelShuffleDescriptor&, //descriptor
139  Optional<std::string &> reasonIfUnsupported) const
140 {
141  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
142 }
143 
145  const TensorInfo&, // input1
146  const TensorInfo&, // output
147  const ComparisonDescriptor&, // descriptor
148  Optional<std::string&> reasonIfUnsupported) const
149 {
150  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
151 }
152 
153 bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*>, // inputs
154  const TensorInfo&, // output
155  const OriginsDescriptor&, // descriptor
156  Optional<std::string&> reasonIfUnsupported) const
157 {
158  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
159 }
160 
162  Optional<std::string&> reasonIfUnsupported) const
163 {
164  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
165 }
166 
168  const TensorInfo&, // output
169  Optional<std::string&> reasonIfUnsupported) const
170 {
171  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
172 }
173 
175  const TensorInfo&, // output
176  Optional<std::string&> reasonIfUnsupported) const
177 {
178  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
179 }
180 
182  const TensorInfo&, // output
183  Optional<std::string&> reasonIfUnsupported) const
184 {
185  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
186 }
187 
188 
190  const TensorInfo&, // output
191  Optional<std::string&> reasonIfUnsupported) const
192 {
193  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
194 }
195 
197  const TensorInfo&, // output
198  const Convolution2dDescriptor&, // descriptor
199  const TensorInfo&, // weights
200  const Optional<TensorInfo>&, // biases
201  Optional<std::string&> reasonIfUnsupported) const
202 {
203  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
204 }
205 
207  const TensorInfo&, // output
208  const Convolution3dDescriptor&, // descriptor
209  const TensorInfo&, // weights
210  const Optional<TensorInfo>&, // biases
211  Optional<std::string&> reasonIfUnsupported) const
212 {
213  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
214 }
215 
217  const TensorInfo&, // output
218  Optional<std::string&> reasonIfUnsupported) const
219 {
220  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
221 }
222 
224  const TensorInfo&, // output
225  const DepthToSpaceDescriptor&, // descriptor
226  Optional<std::string&> reasonIfUnsupported) const
227 {
228  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
229 }
230 
232  const TensorInfo&, //output
233  const DepthwiseConvolution2dDescriptor&, // descriptor
234  const TensorInfo&, // weights
235  const Optional<TensorInfo>&, // biases
236  Optional<std::string&> reasonIfUnsupported) const
237 {
238  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
239 }
240 
242  const TensorInfo&, // output
243  Optional<std::string&> reasonIfUnsupported) const
244 {
245  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
246 }
247 
249  const TensorInfo&, // scores
250  const TensorInfo&, // anchors
251  const TensorInfo&, // detectionBoxes
252  const TensorInfo&, // detectionClasses
253  const TensorInfo&, // detectionScores
254  const TensorInfo&, // numDetections
255  const DetectionPostProcessDescriptor&, //descriptor
256  Optional<std::string&> reasonIfUnsupported) const
257 {
258  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
259 }
260 
262  const TensorInfo&, // output
263  const DepthwiseConvolution2dDescriptor&, // descriptor
264  const TensorInfo&,// weights
265  const Optional<TensorInfo>&, // biases
266  Optional<std::string&> reasonIfUnsupported) const
267 {
268  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
269 }
270 
272  const TensorInfo&, // input1
273  const TensorInfo&, // output
274  Optional<std::string&> reasonIfUnsupported) const
275 {
276  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
277 }
278 
280  const TensorInfo&, // output
281  const ElementwiseUnaryDescriptor&, // descriptor
282  Optional<std::string&> reasonIfUnsupported) const
283 {
284  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
285 }
286 
288  const FakeQuantizationDescriptor&, // descriptor
289  Optional<std::string&> reasonIfUnsupported) const
290 {
291  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
292 }
293 
295  const TensorInfo&, // output
296  const FillDescriptor&, // descriptor
297  Optional<std::string&> reasonIfUnsupported) const
298 {
299  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
300 }
301 
303  const TensorInfo&, // output
304  Optional<std::string&> reasonIfUnsupported) const
305 {
306  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
307 }
308 
310  const TensorInfo&, // output
311  const TensorInfo&, // weights
312  const TensorInfo&, // biases
313  const FullyConnectedDescriptor&, // descriptor
314  Optional<std::string&> reasonIfUnsupported) const
315 {
316  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
317 }
318 
320  const armnn::TensorInfo&, // input1
321  const armnn::TensorInfo&, // output
322  const GatherDescriptor&, // descriptor
323  armnn::Optional<std::string&> reasonIfUnsupported) const
324 {
325  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
326 }
327 
329  Optional<std::string&> reasonIfUnsupported) const
330 {
331  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
332 }
333 
335  const TensorInfo&, // output
336  const InstanceNormalizationDescriptor&, // descriptor
337  Optional<std::string&> reasonIfUnsupported) const
338 {
339  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
340 }
341 
343  const TensorInfo&, // output
344  const L2NormalizationDescriptor&, // descriptor
345  Optional<std::string&> reasonIfUnsupported) const
346 {
347  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
348 }
349 
351  const TensorInfo&, // input1
352  const TensorInfo&, // output
353  const LogicalBinaryDescriptor&, // descriptor
354  Optional<std::string&> reasonIfUnsupported) const
355 {
356  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
357 }
358 
360  const TensorInfo&, // output
361  const ElementwiseUnaryDescriptor&, // descriptor
362  Optional<std::string&> reasonIfUnsupported) const
363 {
364  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
365 }
366 
368  const TensorInfo&, // output
369  const LogSoftmaxDescriptor&, // descriptor
370  Optional<std::string&> reasonIfUnsupported) const
371 {
372  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
373 }
374 
376  const TensorInfo&, // outputStateIn
377  const TensorInfo&, // cellStateIn
378  const TensorInfo&, // scratchBuffer
379  const TensorInfo&, // outputStateOut
380  const TensorInfo&, // cellStateOut
381  const TensorInfo&, // output
382  const LstmDescriptor&, // descriptor
383  const LstmInputParamsInfo&, // paramsInfo
384  Optional<std::string&> reasonIfUnsupported) const
385 {
386  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
387 }
388 
390  const TensorInfo&, // input1
391  const TensorInfo&, // output
392  Optional<std::string&> reasonIfUnsupported) const
393 {
394  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
395 }
396 
398  const TensorInfo&, // output
399  const MeanDescriptor&, // descriptor
400  Optional<std::string&> reasonIfUnsupported) const
401 {
402  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
403 }
404 
406  const armnn::TensorInfo&, // output
407  armnn::Optional<std::string &> ) const // reasonIfUnsupported
408 {
409  return true;
410 }
411 
413  const armnn::TensorInfo&, // output
414  armnn::Optional<std::string &> ) const // reasonIfUnsupported
415 {
416  return true;
417 }
418 
420  const TensorInfo&, // input1
421  const TensorInfo&, // output
422  Optional<std::string&> reasonIfUnsupported) const
423 {
424  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
425 }
426 
428  const TensorInfo&, // input1
429  const TensorInfo&, // output
430  Optional<std::string&> reasonIfUnsupported) const
431 {
432  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
433 }
434 
436  const TensorInfo&, // input1
437  const TensorInfo&, // output
438  Optional<std::string&> reasonIfUnsupported) const
439 {
440  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
441 }
442 
444  const TensorInfo&, // output
445  const NormalizationDescriptor&, // descriptor
446  Optional<std::string&> reasonIfUnsupported) const
447 {
448  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
449 }
450 
452  Optional<std::string&> reasonIfUnsupported) const
453 {
454  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
455 }
456 
458  const TensorInfo&, // output
459  const PadDescriptor&, // descriptor
460  Optional<std::string&> reasonIfUnsupported) const
461 {
462  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
463 }
464 
466  const TensorInfo&, // output
467  const PermuteDescriptor&, // descriptor
468  Optional<std::string&> reasonIfUnsupported) const
469 {
470  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
471 }
472 
474  const TensorInfo&, // output
475  const Pooling2dDescriptor&, // descriptor
476  Optional<std::string&> reasonIfUnsupported) const
477 {
478  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
479 }
480 
482  const TensorInfo&, // output
483  const Pooling3dDescriptor&, // descriptor
484  Optional<std::string&> reasonIfUnsupported) const
485 {
486  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
487 }
488 
490  const PreCompiledDescriptor&, // descriptor
491  Optional<std::string&> reasonIfUnsupported) const
492 {
493  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
494 }
495 
497  const TensorInfo&, // alpha
498  const TensorInfo&, // output
499  Optional<std::string &> reasonIfUnsupported) const
500 {
501  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
502 }
503 
505  const armnn::TensorInfo&, // output
506  armnn::Optional<std::string&> reasonIfUnsupported) const
507 {
508  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
509 }
510 
512  const TensorInfo&, // previousOutputIn
513  const TensorInfo&, // previousCellStateIn
514  const TensorInfo&, // outputStateOut
515  const TensorInfo&, // cellStateOut
516  const TensorInfo&, // output
517  const QLstmDescriptor&, // descriptor
518  const LstmInputParamsInfo&, // paramsInfo
519  Optional<std::string&> reasonIfUnsupported) const
520 {
521  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
522 }
523 
525  const TensorInfo&, // previousCellStateIn
526  const TensorInfo&, // previousOutputIn
527  const TensorInfo&, // cellStateOut
528  const TensorInfo&, // output
529  const QuantizedLstmInputParamsInfo&, // paramsInfo
530  Optional<std::string&> reasonIfUnsupported) const
531 {
532  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
533 }
534 
536  const TensorInfo&, // output
537  Optional<std::string&> reasonIfUnsupported) const
538 {
539  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
540 }
541 
543  const TensorInfo& /*output*/,
544  const ReduceDescriptor& /*descriptor*/,
545  Optional<std::string&> reasonIfUnsupported) const
546 {
547  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
548 }
549 
551  const TensorInfo&, // output
552  const ReshapeDescriptor&, // descriptor
553  Optional<std::string&> reasonIfUnsupported) const
554 {
555  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
556 }
557 
559  const TensorInfo&, // output
560  const ResizeDescriptor&, // descriptor
561  Optional<std::string&> reasonIfUnsupported) const
562 {
563  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
564 }
565 
567  const TensorInfo&, // output
568  Optional<std::string&> reasonIfUnsupported) const
569 {
570  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
571 }
572 
574  const TensorInfo&, // output
575  const SliceDescriptor&, // descriptor
576  Optional<std::string&> reasonIfUnsupported) const
577 {
578  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
579 }
580 
582  const TensorInfo&, // output
583  const SoftmaxDescriptor&, // descriptor
584  Optional<std::string&> reasonIfUnsupported) const
585 {
586  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
587 }
588 /**/
590  const TensorInfo&, // output
591  const SpaceToBatchNdDescriptor&, // descriptor
592  Optional<std::string&> reasonIfUnsupported) const
593 {
594  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
595 }
596 
598  const TensorInfo&, // output
599  const SpaceToDepthDescriptor&, // descriptor
600  Optional<std::string&> reasonIfUnsupported) const
601 {
602  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
603 }
604 
606  const std::vector<std::reference_wrapper<TensorInfo>>&, // outputs
607  const ViewsDescriptor&, // descriptor
608  Optional<std::string&> reasonIfUnsupported) const
609 {
610  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
611 }
612 
613 bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>&, // inputs
614  const TensorInfo&, // output
615  const StackDescriptor&, // descriptor
616  Optional<std::string&> reasonIfUnsupported) const
617 {
618  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
619 }
620 
621 bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&, // inputs
622  const std::vector<const TensorInfo*>&, // outputs
623  const StandInDescriptor&, // descriptor
624  Optional<std::string&> reasonIfUnsupported) const
625 {
626  if (reasonIfUnsupported)
627  {
628  std::stringstream message;
629  message << "StandIn layer is not executable via backends";
630 
631  reasonIfUnsupported.value() = message.str();
632  }
633 
634  return false;
635 }
636 
638  const TensorInfo&, // output
639  const StridedSliceDescriptor&, // descriptor
640  Optional<std::string&> reasonIfUnsupported) const
641 {
642  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
643 }
644 
646  const TensorInfo&, // input1
647  const TensorInfo&, // output
648  Optional<std::string&> reasonIfUnsupported) const
649 {
650  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
651 }
652 
654  const TensorInfo&, // input1
655  const TensorInfo&, // output0
656  const TensorInfo&, // output1
657  Optional<std::string&> reasonIfUnsupported) const
658 {
659  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
660 }
661 
663  const TensorInfo&, // output
664  const TransposeConvolution2dDescriptor&, // descriptor
665  const TensorInfo&, // weights
666  const Optional<TensorInfo>&, // biases
667  Optional<std::string&> reasonIfUnsupported) const
668 {
669  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
670 }
671 
673  const TensorInfo&, // output
674  const TransposeDescriptor&, // descriptor
675  Optional<std::string&> reasonIfUnsupported) const
676 {
677  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
678 }
679 
681  const TensorInfo&, // outputStateIn
682  const TensorInfo&, // cellStateIn
683  const TensorInfo&, // output
684  const Optional<TensorInfo>&, // hiddenStateOut
685  const Optional<TensorInfo>&, // cellStateOut
686  const LstmDescriptor&, // descriptor
687  const LstmInputParamsInfo&, // paramsInfo
688  Optional<std::string&> reasonIfUnsupported) const
689 {
690  return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
691 }
692 
693 } // namespace armnn
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo=EmptyOptional(), const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo=EmptyOptional(), Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
A ReshapeDescriptor for the ReshapeLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
virtual bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
Base class for all descriptors.
Definition: Descriptors.hpp:22
A StackDescriptor for the StackLayer.
A PadDescriptor for the PadLayer.
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A GatherDescriptor for the GatherLayer.
A StandInDescriptor for the StandIn layer.
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
A Convolution3dDescriptor for the Convolution3dLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling3dDescriptor for the Pooling3dLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A MeanDescriptor for the MeanLayer.
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SoftmaxDescriptor for the SoftmaxLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458