ArmNN
 22.11
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <armnn/Logging.hpp>
9 
11 
12 namespace armnn
13 {
14 
15 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
17 {
19 
20  if (!backendRegistry.IsBackendRegistered(backend))
21  {
22  return LayerSupportHandle(nullptr);
23  }
24 
25  auto factoryFunc = backendRegistry.GetFactory(backend);
26  auto backendObject = factoryFunc();
27  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
28 }
29 
30 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
31  const BackendCapabilities& capabilities)
32 {
33  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
34  {
35  const auto& capability = capabilities.GetOption(i);
36  if (backendCapabilityName == capability.GetName())
37  {
38  return capability;
39  }
40  }
41  return EmptyOptional();
42 }
43 
44 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
45  const armnn::BackendId& backend)
46 {
47  auto const& backendRegistry = armnn::BackendRegistryInstance();
48  if (backendRegistry.IsBackendRegistered(backend))
49  {
50  auto factoryFunc = backendRegistry.GetFactory(backend);
51  auto backendObject = factoryFunc();
52  auto capabilities = backendObject->GetCapabilities();
53  return GetCapability(backendCapabilityName, capabilities);
54  }
55  return EmptyOptional();
56 }
57 
58 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
59 {
60  return GetCapability(name, capabilities).has_value();
61 }
62 
63 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
64 {
65  return GetCapability(name, backend).has_value();
66 }
67 
68 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
69 {
70  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
71  {
72  const auto& backendCapability = capabilities.GetOption(i);
73  if (capability.GetName() == backendCapability.GetName())
74  {
75  if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
76  {
77  return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
78  }
79  else if (capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
80  {
81  return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
82  }
83  else if (capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
84  {
85  return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
86  }
87  else if (capability.GetValue().IsString() && backendCapability.GetValue().IsString())
88  {
89  return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
90  }
91  else if (capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
92  {
93  return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
94  }
95  }
96  }
97  return false;
98 }
99 
100 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
101 {
102  auto const& backendRegistry = armnn::BackendRegistryInstance();
103  if (backendRegistry.IsBackendRegistered(backend))
104  {
105  auto factoryFunc = backendRegistry.GetFactory(backend);
106  auto backendObject = factoryFunc();
107  auto capabilities = backendObject->GetCapabilities();
108  return HasCapability(backendOption, capabilities);
109  }
110  return false;
111 }
112 
113 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
114 {
115  auto const& backendRegistry = armnn::BackendRegistryInstance();
116  if (backendRegistry.IsBackendRegistered(backend))
117  {
118  auto factoryFunc = backendRegistry.GetFactory(backend);
119  auto backendObject = factoryFunc();
120  return backendObject->GetNumberOfCacheFiles();
121  }
122  return 0;
123 }
124 
126 {
127  if (m_LayerSupport)
128  {
129  return true;
130  }
131 
132  return false;
133 }
134 
135 using TensorInfos = std::vector<TensorInfo>;
136 
138  const TensorInfo& output,
141 {
142  TensorInfos infos{input, output};
143 
144  return m_LayerSupport->IsLayerSupported(LayerType::Activation,
145  infos,
146  descriptor,
147  EmptyOptional(),
148  EmptyOptional(),
149  reasonIfUnsupported);
150 }
151 
153  const TensorInfo& input1,
154  const TensorInfo& output,
156 {
157  TensorInfos infos{input0, input1, output};
158 
159  return m_LayerSupport->IsLayerSupported(LayerType::Addition,
160  infos,
161  BaseDescriptor(),
162  EmptyOptional(),
163  EmptyOptional(),
164  reasonIfUnsupported);
165 }
166 
168  const TensorInfo& output,
171 {
172  TensorInfos infos{input, output};
173 
174  return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
175  infos,
176  descriptor,
177  EmptyOptional(),
178  EmptyOptional(),
179  reasonIfUnsupported);
180 }
181 
183  const TensorInfo& input1,
184  const TensorInfo& output,
187 {
188  TensorInfos infos{input0, input1, output};
189 
190  return m_LayerSupport->IsLayerSupported(LayerType::BatchMatMul,
191  infos,
192  descriptor,
193  EmptyOptional(),
194  EmptyOptional(),
195  reasonIfUnsupported);
196 }
197 
199  const TensorInfo& output,
200  const TensorInfo& mean,
201  const TensorInfo& var,
202  const TensorInfo& beta,
203  const TensorInfo& gamma,
206 {
207  TensorInfos infos{input, output, mean, var, beta, gamma};
208 
209  return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
210  infos,
211  descriptor,
212  EmptyOptional(),
213  EmptyOptional(),
214  reasonIfUnsupported);
215 }
216 
218  const TensorInfo& output,
221 {
222  TensorInfos infos{input, output};
223 
224  return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
225  infos,
226  descriptor,
227  EmptyOptional(),
228  EmptyOptional(),
229  reasonIfUnsupported);
230 }
231 
233  const TensorInfo& output,
235 {
236  TensorInfos infos{input, output};
237 
238  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
239  infos,
240  BaseDescriptor(),
241  EmptyOptional(),
242  EmptyOptional(),
243  reasonIfUnsupported);
244 }
245 
247  const TensorInfo &output,
250 {
251  TensorInfos infos{input, output};
252 
253  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
254  infos,
255  descriptor,
256  EmptyOptional(),
257  EmptyOptional(),
258  reasonIfUnsupported);
259 }
260 
262  const TensorInfo& input1,
263  const TensorInfo& output,
266 {
267  TensorInfos infos{input0, input1, output};
268 
269  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
270  infos,
271  descriptor,
272  EmptyOptional(),
273  EmptyOptional(),
274  reasonIfUnsupported);
275 }
276 
277 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
278  const TensorInfo& output,
281 {
282  TensorInfos infos;
283  for (const TensorInfo* inputInfo : inputs)
284  {
285  infos.push_back(*inputInfo);
286  }
287  infos.push_back(output);
288 
289  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
290  infos,
291  descriptor,
292  EmptyOptional(),
293  EmptyOptional(),
294  reasonIfUnsupported);
295 }
296 
299 {
300  TensorInfos infos{output};
301 
302  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
303  infos,
304  BaseDescriptor(),
305  EmptyOptional(),
306  EmptyOptional(),
307  reasonIfUnsupported);
308 }
309 
311  const TensorInfo& output,
313 {
314  TensorInfos infos{input, output};
315 
316  return m_LayerSupport->IsLayerSupported(LayerType::ConvertBf16ToFp32,
317  infos,
318  BaseDescriptor(),
319  EmptyOptional(),
320  EmptyOptional(),
321  reasonIfUnsupported);
322 }
323 
325  const TensorInfo& output,
327 {
328  TensorInfos infos{input, output};
329 
330  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToBf16,
331  infos,
332  BaseDescriptor(),
333  EmptyOptional(),
334  EmptyOptional(),
335  reasonIfUnsupported);
336 }
337 
339  const TensorInfo& output,
341 {
342  TensorInfos infos{input, output};
343 
344  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
345  infos,
346  BaseDescriptor(),
347  EmptyOptional(),
348  EmptyOptional(),
349  reasonIfUnsupported);
350 }
351 
353  const TensorInfo& output,
355 {
356  TensorInfos infos{input, output};
357 
358  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
359  infos,
360  BaseDescriptor(),
361  EmptyOptional(),
362  EmptyOptional(),
363  reasonIfUnsupported);
364 }
365 
367  const TensorInfo& output,
369  const TensorInfo& weights,
370  const Optional<TensorInfo>& biases,
372 {
373  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
374  TensorInfos infos{input, output, weights, biasesVal};
375 
377  if (!m_BackendId.IsUndefined())
378  {
379  capability = GetCapability("NonConstWeights", m_BackendId);
380  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
381  {
382  if (!weights.IsConstant())
383  {
384  if (reasonIfUnsupported.has_value())
385  {
386  reasonIfUnsupported.value() =
387  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
388  "Convolution2d weights are set as dynamic (non constant). ";
389  }
390  return false;
391  }
392  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
393  {
394  if (reasonIfUnsupported.has_value())
395  {
396  reasonIfUnsupported.value() =
397  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
398  "Convolution2d biases are set as dynamic (non constant). ";
399  }
400  return false;
401  }
402 
403  // At the first stage we will only print a warning. this is to give
404  // backend developers a chance to adopt and read weights from input slots.
405  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
406  "If you are a backend developer please find more information in our "
407  "doxygen documentation on github https://github.com/ARM-software/armnn "
408  "under the keyword 'ConstTensorsAsInputs'.";
409  }
410  }
411 
412  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
413  infos,
414  descriptor,
415  EmptyOptional(),
416  EmptyOptional(),
417  reasonIfUnsupported);
418 }
419 
421  const TensorInfo& output,
423  const TensorInfo& weights,
424  const Optional<TensorInfo>& biases,
426 {
427  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
428  TensorInfos infos{input, output, weights, biasesVal};
429 
430  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
431  infos,
432  descriptor,
433  EmptyOptional(),
434  EmptyOptional(),
435  reasonIfUnsupported);
436 }
437 
439  const TensorInfo& output,
441 {
442  TensorInfos infos{input, output};
443 
444  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
445  infos,
446  BaseDescriptor(),
447  EmptyOptional(),
448  EmptyOptional(),
449  reasonIfUnsupported);
450 }
451 
453  const TensorInfo& output,
456 {
457  TensorInfos infos{input, output};
458 
459  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
460  infos,
461  descriptor,
462  EmptyOptional(),
463  EmptyOptional(),
464  reasonIfUnsupported);
465 }
466 
468  const TensorInfo& input,
469  const TensorInfo& output,
471  const TensorInfo& weights,
472  const Optional<TensorInfo>& biases,
474 {
475  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
476  TensorInfos infos{input, output, weights, biasesVal};
477 
479  if (!m_BackendId.IsUndefined())
480  {
481  capability = GetCapability("NonConstWeights", m_BackendId);
482  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
483  {
484  if (!weights.IsConstant())
485  {
486  if (reasonIfUnsupported.has_value())
487  {
488  reasonIfUnsupported.value() =
489  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
490  "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
491  }
492  return false;
493  }
494  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
495  {
496  if (reasonIfUnsupported.has_value())
497  {
498  reasonIfUnsupported.value() =
499  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
500  "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
501  }
502  return false;
503  }
504  // At the first stage we will only print a warning. this is to give
505  // backend developers a chance to adopt and read weights from input slots.
506  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
507  "If you are a backend developer please find more information in our "
508  "doxygen documentation on github https://github.com/ARM-software/armnn "
509  "under the keyword 'ConstTensorsAsInputs'.";
510  }
511  }
512 
513  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
514  infos,
515  descriptor,
516  EmptyOptional(),
517  EmptyOptional(),
518  reasonIfUnsupported);
519 }
520 
522  const TensorInfo& output,
524 {
525  TensorInfos infos{input, output};
526 
527  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
528  infos,
529  BaseDescriptor(),
530  EmptyOptional(),
531  EmptyOptional(),
532  reasonIfUnsupported);
533 }
534 
536  const TensorInfo& scores,
537  const TensorInfo& anchors,
538  const TensorInfo& detectionBoxes,
539  const TensorInfo& detectionClasses,
540  const TensorInfo& detectionScores,
541  const TensorInfo& numDetections,
544 {
545  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
546 
547  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
548  infos,
549  descriptor,
550  EmptyOptional(),
551  EmptyOptional(),
552  reasonIfUnsupported);
553 }
554 
556  const TensorInfo& input,
557  const TensorInfo& output,
559  const TensorInfo& weights,
560  const Optional<TensorInfo>& biases,
562 {
563  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
564  TensorInfos infos{input, output, weights, biasesVal};
565 
567  if (!m_BackendId.IsUndefined())
568  {
569  capability = GetCapability("NonConstWeights", m_BackendId);
570  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
571  {
572  if (!weights.IsConstant())
573  {
574  if (reasonIfUnsupported.has_value())
575  {
576  reasonIfUnsupported.value() =
577  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
578  "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
579  }
580  return false;
581  }
582  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
583  {
584  if (reasonIfUnsupported.has_value())
585  {
586  reasonIfUnsupported.value() =
587  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
588  "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
589  }
590  return false;
591  }
592  // At the first stage we will only print a warning. this is to give
593  // backend developers a chance to adopt and read weights from input slots.
594  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
595  "If you are a backend developer please find more information in our "
596  "doxygen documentation on github https://github.com/ARM-software/armnn "
597  "under the keyword 'ConstTensorsAsInputs'.";
598  }
599  }
600 
601  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
602  infos,
603  descriptor,
604  EmptyOptional(),
605  EmptyOptional(),
606  reasonIfUnsupported);
607 }
608 
610  const TensorInfo& input1,
611  const TensorInfo& output,
613 {
614  TensorInfos infos{input0, input1, output};
615 
616  return m_LayerSupport->IsLayerSupported(LayerType::Division,
617  infos,
618  BaseDescriptor(),
619  EmptyOptional(),
620  EmptyOptional(),
621  reasonIfUnsupported);
622 }
623 
625  const TensorInfo& output,
628 {
629  TensorInfos infos{input, output};
630 
631  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
632  infos,
633  descriptor,
634  EmptyOptional(),
635  EmptyOptional(),
636  reasonIfUnsupported);
637 }
638 
642 {
643  TensorInfos infos{input};
644 
645  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
646  infos,
647  descriptor,
648  EmptyOptional(),
649  EmptyOptional(),
650  reasonIfUnsupported);
651 }
652 
654  const TensorInfo& output,
655  const FillDescriptor& descriptor,
657 {
658  TensorInfos infos{input, output};
659 
660  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
661  infos,
662  descriptor,
663  EmptyOptional(),
664  EmptyOptional(),
665  reasonIfUnsupported);
666 }
667 
669  const TensorInfo& output,
671 {
672  TensorInfos infos{input, output};
673 
674  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
675  infos,
676  BaseDescriptor(),
677  EmptyOptional(),
678  EmptyOptional(),
679  reasonIfUnsupported);
680 }
681 
683  const TensorInfo& output,
684  const TensorInfo& weights,
685  const TensorInfo& biases,
688 {
689  TensorInfos infos{input, output, weights, biases};
690 
692  if (!m_BackendId.IsUndefined())
693  {
694  capability = GetCapability("NonConstWeights", m_BackendId);
695  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
696  {
697  if (!descriptor.m_ConstantWeights)
698  {
699  if (reasonIfUnsupported.has_value())
700  {
701  reasonIfUnsupported.value() =
702  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
703  "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
704  }
705  return false;
706  }
707  if (!weights.IsConstant())
708  {
709  if (reasonIfUnsupported.has_value())
710  {
711  reasonIfUnsupported.value() =
712  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
713  "FullyConnected weights are set as dynamic (non constant). ";
714  }
715 
716  return false;
717  }
718  if (descriptor.m_BiasEnabled && !biases.IsConstant())
719  {
720  if (reasonIfUnsupported.has_value())
721  {
722  reasonIfUnsupported.value() =
723  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
724  "FullyConnected biases are set as dynamic (non constant). ";
725  }
726  return false;
727  }
728 
729  // At the first stage we will only print a warning. this is to give
730  // backend developers a chance to adopt and read weights from input slots.
731  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
732  "If you are a backend developer please find more information in our "
733  "doxygen documentation on github https://github.com/ARM-software/armnn "
734  "under the keyword 'ConstTensorsAsInputs'.";
735  }
736  }
737 
738  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
739  infos,
740  descriptor,
741  EmptyOptional(),
742  EmptyOptional(),
743  reasonIfUnsupported);
744 }
745 
747  const TensorInfo& input1,
748  const TensorInfo& output,
751 {
752  TensorInfos infos{input0, input1, output};
753 
754  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
755  infos,
756  descriptor,
757  EmptyOptional(),
758  EmptyOptional(),
759  reasonIfUnsupported);
760 }
761 
763  const TensorInfo& input1,
764  const TensorInfo& output,
766 {
767  TensorInfos infos{input0, input1, output};
768 
769  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
770  infos,
771  BaseDescriptor(),
772  EmptyOptional(),
773  EmptyOptional(),
774  reasonIfUnsupported);
775 }
776 
779 {
780  TensorInfos infos{input};
781 
782  return m_LayerSupport->IsLayerSupported(LayerType::Input,
783  infos,
784  BaseDescriptor(),
785  EmptyOptional(),
786  EmptyOptional(),
787  reasonIfUnsupported);
788 }
789 
791  const TensorInfo& input,
792  const TensorInfo& output,
795 {
796  TensorInfos infos{input, output};
797 
798  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
799  infos,
800  descriptor,
801  EmptyOptional(),
802  EmptyOptional(),
803  reasonIfUnsupported);
804 }
805 
807  const TensorInfo& output,
810 {
811  TensorInfos infos{input, output};
812 
813  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
814  infos,
815  descriptor,
816  EmptyOptional(),
817  EmptyOptional(),
818  reasonIfUnsupported);
819 }
820 
822  const TensorInfo& input1,
823  const TensorInfo& output,
826 {
827  TensorInfos infos{input0, input1, output};
828 
829  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
830  infos,
831  descriptor,
832  EmptyOptional(),
833  EmptyOptional(),
834  reasonIfUnsupported);
835 }
836 
838  const TensorInfo& output,
841 {
842  TensorInfos infos{input, output};
843 
844  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
845  infos,
846  descriptor,
847  EmptyOptional(),
848  EmptyOptional(),
849  reasonIfUnsupported);
850 }
851 
853  const TensorInfo& output,
856 {
857  TensorInfos infos{input, output};
858 
859  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
860  infos,
861  descriptor,
862  EmptyOptional(),
863  EmptyOptional(),
864  reasonIfUnsupported);
865 }
866 
868  const TensorInfo& outputStateIn,
869  const TensorInfo& cellStateIn,
870  const TensorInfo& scratchBuffer,
871  const TensorInfo& outputStateOut,
872  const TensorInfo& cellStateOut,
873  const TensorInfo& output,
874  const LstmDescriptor& descriptor,
877 {
878  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
879 
880  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
881  infos,
882  descriptor,
883  paramsInfo,
884  EmptyOptional(),
885  reasonIfUnsupported);
886 }
887 
889  const TensorInfo& input1,
890  const TensorInfo& output,
892 {
893  TensorInfos infos{input0, input1, output};
894 
895  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
896  infos,
897  BaseDescriptor(),
898  EmptyOptional(),
899  EmptyOptional(),
900  reasonIfUnsupported);
901 }
902 
904  const TensorInfo& output,
905  const MeanDescriptor& descriptor,
907 {
908  TensorInfos infos{input, output};
909 
910  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
911  infos,
912  descriptor,
913  EmptyOptional(),
914  EmptyOptional(),
915  reasonIfUnsupported);
916 }
917 
919  const TensorInfo& output,
921 {
922  TensorInfos infos{input, output};
923 
924  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
925  infos,
926  BaseDescriptor(),
927  EmptyOptional(),
928  EmptyOptional(),
929  reasonIfUnsupported);
930 }
931 
933  const TensorInfo& output,
935 {
936  TensorInfos infos{input, output};
937 
938  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
939  infos,
940  BaseDescriptor(),
941  EmptyOptional(),
942  EmptyOptional(),
943  reasonIfUnsupported);
944 }
945 
947  const TensorInfo& input1,
948  const TensorInfo& output,
950 {
951  TensorInfos infos{input0, input1, output};
952 
953  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
954  infos,
955  BaseDescriptor(),
956  EmptyOptional(),
957  EmptyOptional(),
958  reasonIfUnsupported);
959 }
960 
962  const TensorInfo& input1,
963  const TensorInfo& output,
965 {
966  TensorInfos infos{input0, input1, output};
967 
968  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
969  infos,
970  BaseDescriptor(),
971  EmptyOptional(),
972  EmptyOptional(),
973  reasonIfUnsupported);
974 }
975 
977  const TensorInfo& input1,
978  const TensorInfo& output,
980 {
981  TensorInfos infos{input0, input1, output};
982 
983  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
984  infos,
985  BaseDescriptor(),
986  EmptyOptional(),
987  EmptyOptional(),
988  reasonIfUnsupported);
989 }
990 
992  const TensorInfo& output,
995 {
996  TensorInfos infos{input, output};
997 
998  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
999  infos,
1000  descriptor,
1001  EmptyOptional(),
1002  EmptyOptional(),
1003  reasonIfUnsupported);
1004 }
1005 
1008 {
1009  TensorInfos infos{output};
1010 
1011  return m_LayerSupport->IsLayerSupported(LayerType::Output,
1012  infos,
1013  BaseDescriptor(),
1014  EmptyOptional(),
1015  EmptyOptional(),
1016  reasonIfUnsupported);
1017 }
1018 
1020  const TensorInfo& output,
1021  const PadDescriptor& descriptor,
1023 {
1024  TensorInfos infos{input, output};
1025 
1026  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
1027  infos,
1028  descriptor,
1029  EmptyOptional(),
1030  EmptyOptional(),
1031  reasonIfUnsupported);
1032 }
1033 
1035  const TensorInfo& output,
1038 {
1039  TensorInfos infos{input, output};
1040 
1041  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
1042  infos,
1043  descriptor,
1044  EmptyOptional(),
1045  EmptyOptional(),
1046  reasonIfUnsupported);
1047 }
1048 
1050  const TensorInfo& output,
1053 {
1054  TensorInfos infos{input, output};
1055 
1056  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
1057  infos,
1058  descriptor,
1059  EmptyOptional(),
1060  EmptyOptional(),
1061  reasonIfUnsupported);
1062 }
1063 
1065  const TensorInfo& output,
1068 {
1069  TensorInfos infos{input, output};
1070 
1071  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
1072  infos,
1073  descriptor,
1074  EmptyOptional(),
1075  EmptyOptional(),
1076  reasonIfUnsupported);
1077 }
1078 
1082 {
1083  TensorInfos infos{input};
1084 
1085  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
1086  infos,
1087  descriptor,
1088  EmptyOptional(),
1089  EmptyOptional(),
1090  reasonIfUnsupported);
1091 }
1092 
1094  const TensorInfo& alpha,
1095  const TensorInfo& output,
1097 {
1098  TensorInfos infos{input, alpha, output};
1099 
1100  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
1101  infos,
1102  BaseDescriptor(),
1103  EmptyOptional(),
1104  EmptyOptional(),
1105  reasonIfUnsupported);
1106 }
1107 
1109  const TensorInfo& output,
1111 {
1112  TensorInfos infos{input, output};
1113 
1114  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
1115  infos,
1116  BaseDescriptor(),
1117  EmptyOptional(),
1118  EmptyOptional(),
1119  reasonIfUnsupported);
1120 }
1121 
1123  const TensorInfo& previousOutputIn,
1124  const TensorInfo& previousCellStateIn,
1125  const TensorInfo& outputStateOut,
1126  const TensorInfo& cellStateOut,
1127  const TensorInfo& output,
1128  const QLstmDescriptor& descriptor,
1131 {
1132  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1133 
1134  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1135  infos,
1136  descriptor,
1137  paramsInfo,
1138  EmptyOptional(),
1139  reasonIfUnsupported);
1140 }
1141 
1143  const TensorInfo& previousCellStateIn,
1144  const TensorInfo& previousOutputIn,
1145  const TensorInfo& cellStateOut,
1146  const TensorInfo& output,
1149 {
1150  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1151 
1152  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1153  infos,
1154  BaseDescriptor(),
1155  EmptyOptional(),
1156  paramsInfo,
1157  reasonIfUnsupported);
1158 }
1159 
1161  const TensorInfo& output,
1163 {
1164  TensorInfos infos{input, output};
1165 
1166  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1167  infos,
1168  BaseDescriptor(),
1169  EmptyOptional(),
1170  EmptyOptional(),
1171  reasonIfUnsupported);
1172 }
1173 
1175  const TensorInfo& output,
1178 {
1179  TensorInfos infos{input, output};
1180 
1181  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1182  infos,
1183  descriptor,
1184  EmptyOptional(),
1185  EmptyOptional(),
1186  reasonIfUnsupported);
1187 }
1188 
1190  const TensorInfo& output,
1193 {
1194  TensorInfos infos{input, output};
1195 
1196  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1197  infos,
1198  descriptor,
1199  EmptyOptional(),
1200  EmptyOptional(),
1201  reasonIfUnsupported);
1202 }
1203 
1205  const TensorInfo& output,
1208 {
1209  TensorInfos infos{input, output};
1210 
1211  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1212  infos,
1213  descriptor,
1214  EmptyOptional(),
1215  EmptyOptional(),
1216  reasonIfUnsupported);
1217 }
1218 
1220  const TensorInfo& output,
1222 {
1223  TensorInfos infos{input, output};
1224 
1225  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1226  infos,
1227  BaseDescriptor(),
1228  EmptyOptional(),
1229  EmptyOptional(),
1230  reasonIfUnsupported);
1231 }
1232 
1234  const TensorInfo& output,
1235  const SliceDescriptor& descriptor,
1237 {
1238  TensorInfos infos{input, output};
1239 
1240  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1241  infos,
1242  descriptor,
1243  EmptyOptional(),
1244  EmptyOptional(),
1245  reasonIfUnsupported);
1246 }
1247 
1249  const TensorInfo& output,
1252 {
1253  TensorInfos infos{input, output};
1254 
1255  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1256  infos,
1257  descriptor,
1258  EmptyOptional(),
1259  EmptyOptional(),
1260  reasonIfUnsupported);
1261 }
1262 
1264  const TensorInfo& output,
1267 {
1268  TensorInfos infos{input, output};
1269 
1270  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1271  infos,
1272  descriptor,
1273  EmptyOptional(),
1274  EmptyOptional(),
1275  reasonIfUnsupported);
1276 }
1277 
1279  const TensorInfo& output,
1282 {
1283  TensorInfos infos{input, output};
1284 
1285  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1286  infos,
1287  descriptor,
1288  EmptyOptional(),
1289  EmptyOptional(),
1290  reasonIfUnsupported);
1291 }
1292 
1294  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1295  const ViewsDescriptor& descriptor,
1297 {
1298  TensorInfos infos{input};
1299  for (TensorInfo outInfo : outputs)
1300  {
1301  infos.push_back(outInfo);
1302  }
1303 
1304  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1305  infos,
1306  descriptor,
1307  EmptyOptional(),
1308  EmptyOptional(),
1309  reasonIfUnsupported);
1310 }
1311 
1312 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1313  const TensorInfo& output,
1314  const StackDescriptor& descriptor,
1316 {
1317  TensorInfos infos;
1318  for (const TensorInfo* inputInfo : inputs)
1319  {
1320  infos.push_back(*inputInfo);
1321  }
1322  infos.push_back(output);
1323 
1324  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1325  infos,
1326  descriptor,
1327  EmptyOptional(),
1328  EmptyOptional(),
1329  reasonIfUnsupported);
1330 }
1331 
1332 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
1333  const std::vector<const TensorInfo*>& outputs,
1336 {
1337  TensorInfos infos;
1338  for (const TensorInfo* inputInfo : inputs)
1339  {
1340  infos.push_back(*inputInfo);
1341  }
1342  for (const TensorInfo* outputInfo : outputs)
1343  {
1344  infos.push_back(*outputInfo);
1345  }
1346 
1347  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1348  infos,
1349  descriptor,
1350  EmptyOptional(),
1351  EmptyOptional(),
1352  reasonIfUnsupported);
1353 }
1354 
1355 
1357  const TensorInfo& output,
1360 {
1361  TensorInfos infos{input, output};
1362 
1363  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1364  infos,
1365  descriptor,
1366  EmptyOptional(),
1367  EmptyOptional(),
1368  reasonIfUnsupported);
1369 }
1370 
1372  const TensorInfo& input1,
1373  const TensorInfo& output,
1375 {
1376  TensorInfos infos{input0, input1, output};
1377 
1378  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1379  infos,
1380  BaseDescriptor(),
1381  EmptyOptional(),
1382  EmptyOptional(),
1383  reasonIfUnsupported);
1384 }
1385 
1387  const TensorInfo& input1,
1388  const TensorInfo& output0,
1389  const TensorInfo& output1,
1391 {
1392  TensorInfos infos{input0, input1, output0, output1};
1393 
1394  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1395  infos,
1396  BaseDescriptor(),
1397  EmptyOptional(),
1398  EmptyOptional(),
1399  reasonIfUnsupported);
1400 }
1401 
1403  const TensorInfo& input,
1404  const TensorInfo& output,
1406  const TensorInfo& weights,
1407  const Optional<TensorInfo>& biases,
1409 {
1410  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1411  TensorInfos infos{input, output, weights, biasesVal};
1412 
1413  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1414  infos,
1415  descriptor,
1416  EmptyOptional(),
1417  EmptyOptional(),
1418  reasonIfUnsupported);
1419 }
1420 
1422  const TensorInfo& output,
1425 {
1426  TensorInfos infos{input, output};
1427 
1428  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1429  infos,
1430  descriptor,
1431  EmptyOptional(),
1432  EmptyOptional(),
1433  reasonIfUnsupported);
1434 }
1435 
1436 // Forwarding function to maintain ABI stability
1438  const TensorInfo& outputStateIn,
1439  const TensorInfo& cellStateIn,
1440  const TensorInfo& output,
1443  const LstmDescriptor& descriptor,
1446 {
1447  TensorInfo hiddenStateOutputVal = hiddenStateOutput.has_value() ? hiddenStateOutput.value() : TensorInfo();
1448  TensorInfo cellStateOutputVal = cellStateOutput.has_value() ? cellStateOutput.value() : TensorInfo();
1449  TensorInfos infos{input, outputStateIn, cellStateIn, hiddenStateOutputVal, cellStateOutputVal, output};
1450 
1452  outputStateIn,
1453  cellStateIn,
1454  hiddenStateOutputVal,
1455  cellStateOutputVal,
1456  output,
1457  descriptor,
1458  paramsInfo,
1459  reasonIfUnsupported);
1460 }
1461 
1463  const TensorInfo& outputStateIn,
1464  const TensorInfo& cellStateIn,
1465  const TensorInfo& outputStateOut,
1466  const TensorInfo& cellStateOut,
1467  const TensorInfo& output,
1468  const LstmDescriptor& descriptor,
1471 {
1472  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
1473 
1474  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1475  infos,
1476  descriptor,
1477  paramsInfo,
1478  EmptyOptional(),
1479  reasonIfUnsupported);
1480 }
1481 
1482 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
const BackendOption & GetOption(size_t idx) const
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConstant() const
Definition: Tensor.cpp:509
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const TensorInfo const TensorInfo & cellStateIn
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor & descriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo const TensorInfo const TensorInfo & output
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > & cellStateOutput
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeDescriptor for the ResizeLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Base class for all descriptors.
Definition: Descriptors.hpp:22
A StackDescriptor for the StackLayer.
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
size_t GetOptionCount() const noexcept
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
std::string AsString() const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchMatMulSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
const TensorInfo & outputStateIn
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > & hiddenStateOutput
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchMatMulDescriptor for the BatchMatMul operator.
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling3dDescriptor for the Pooling3dLayer.
unsigned int AsUnsignedInt() const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool AsBool() const
Value getters.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBool() const
Type getters.
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
std::vector< TensorInfo > TensorInfos
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:141
A SoftmaxDescriptor for the SoftmaxLayer.
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor const LstmInputParamsInfo & paramsInfo
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_ConstantWeights
Enable/disable constant weights and biases.