ArmNN
 22.05
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <armnn/Logging.hpp>
9 
11 
12 namespace armnn
13 {
14 
15 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
17 {
19 
20  if (!backendRegistry.IsBackendRegistered(backend))
21  {
22  return LayerSupportHandle(nullptr);
23  }
24 
25  auto factoryFunc = backendRegistry.GetFactory(backend);
26  auto backendObject = factoryFunc();
27  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
28 }
29 
30 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
31  const BackendCapabilities& capabilities)
32 {
33  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
34  {
35  const auto& capability = capabilities.GetOption(i);
36  if (backendCapabilityName == capability.GetName())
37  {
38  return capability;
39  }
40  }
41  return EmptyOptional();
42 }
43 
44 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
45  const armnn::BackendId& backend)
46 {
47  auto const& backendRegistry = armnn::BackendRegistryInstance();
48  if (backendRegistry.IsBackendRegistered(backend))
49  {
50  auto factoryFunc = backendRegistry.GetFactory(backend);
51  auto backendObject = factoryFunc();
52  auto capabilities = backendObject->GetCapabilities();
53  return GetCapability(backendCapabilityName, capabilities);
54  }
55  return EmptyOptional();
56 }
57 
58 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
59 {
60  return GetCapability(name, capabilities).has_value();
61 }
62 
63 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
64 {
65  return GetCapability(name, backend).has_value();
66 }
67 
68 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
69 {
70  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
71  {
72  const auto& backendCapability = capabilities.GetOption(i);
73  if (capability.GetName() == backendCapability.GetName())
74  {
75  if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
76  {
77  return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
78  }
79  else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
80  {
81  return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
82  }
83  else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
84  {
85  return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
86  }
87  else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
88  {
89  return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
90  }
91  else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
92  {
93  return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
94  }
95  }
96  }
97  return false;
98 }
99 
100 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
101 {
102  auto const& backendRegistry = armnn::BackendRegistryInstance();
103  if (backendRegistry.IsBackendRegistered(backend))
104  {
105  auto factoryFunc = backendRegistry.GetFactory(backend);
106  auto backendObject = factoryFunc();
107  auto capabilities = backendObject->GetCapabilities();
108  return HasCapability(backendOption, capabilities);
109  }
110  return false;
111 }
112 
113 /// Convenience function to check a capability on a backend
115 {
116  bool hasCapability = false;
117  auto const& backendRegistry = armnn::BackendRegistryInstance();
118  if (backendRegistry.IsBackendRegistered(backend))
119  {
120  auto factoryFunc = backendRegistry.GetFactory(backend);
121  auto backendObject = factoryFunc();
123  hasCapability = backendObject->HasCapability(capability);
125  }
126  return hasCapability;
127 }
128 
129 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
130 {
131  auto const& backendRegistry = armnn::BackendRegistryInstance();
132  if (backendRegistry.IsBackendRegistered(backend))
133  {
134  auto factoryFunc = backendRegistry.GetFactory(backend);
135  auto backendObject = factoryFunc();
136  return backendObject->GetNumberOfCacheFiles();
137  }
138  return 0;
139 }
140 
142 {
143  if (m_LayerSupport)
144  {
145  return true;
146  }
147 
148  return false;
149 }
150 
151 using TensorInfos = std::vector<TensorInfo>;
152 
154  const TensorInfo& output,
157 {
158  TensorInfos infos{input, output};
159 
160  return m_LayerSupport->IsLayerSupported(LayerType::Activation,
161  infos,
162  descriptor,
163  EmptyOptional(),
164  EmptyOptional(),
165  reasonIfUnsupported);
166 }
167 
169  const TensorInfo& input1,
170  const TensorInfo& output,
172 {
173  TensorInfos infos{input0, input1, output};
174 
175  return m_LayerSupport->IsLayerSupported(LayerType::Addition,
176  infos,
177  BaseDescriptor(),
178  EmptyOptional(),
179  EmptyOptional(),
180  reasonIfUnsupported);
181 }
182 
184  const TensorInfo& output,
187 {
188  TensorInfos infos{input, output};
189 
190  return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
191  infos,
192  descriptor,
193  EmptyOptional(),
194  EmptyOptional(),
195  reasonIfUnsupported);
196 }
197 
199  const TensorInfo& output,
200  const TensorInfo& mean,
201  const TensorInfo& var,
202  const TensorInfo& beta,
203  const TensorInfo& gamma,
206 {
207  TensorInfos infos{input, output, mean, var, beta, gamma};
208 
209  return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
210  infos,
211  descriptor,
212  EmptyOptional(),
213  EmptyOptional(),
214  reasonIfUnsupported);
215 }
216 
218  const TensorInfo& output,
221 {
222  TensorInfos infos{input, output};
223 
224  return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
225  infos,
226  descriptor,
227  EmptyOptional(),
228  EmptyOptional(),
229  reasonIfUnsupported);
230 }
231 
233  const TensorInfo& output,
235 {
236  TensorInfos infos{input, output};
237 
238  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
239  infos,
240  BaseDescriptor(),
241  EmptyOptional(),
242  EmptyOptional(),
243  reasonIfUnsupported);
244 }
245 
247  const TensorInfo &output,
250 {
251  TensorInfos infos{input, output};
252 
253  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
254  infos,
255  descriptor,
256  EmptyOptional(),
257  EmptyOptional(),
258  reasonIfUnsupported);
259 }
260 
262  const TensorInfo& input1,
263  const TensorInfo& output,
266 {
267  TensorInfos infos{input0, input1, output};
268 
269  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
270  infos,
271  descriptor,
272  EmptyOptional(),
273  EmptyOptional(),
274  reasonIfUnsupported);
275 }
276 
277 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
278  const TensorInfo& output,
281 {
282  TensorInfos infos;
283  for (const TensorInfo* inputInfo : inputs)
284  {
285  infos.push_back(*inputInfo);
286  }
287  infos.push_back(output);
288 
289  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
290  infos,
291  descriptor,
292  EmptyOptional(),
293  EmptyOptional(),
294  reasonIfUnsupported);
295 }
296 
299 {
300  TensorInfos infos{output};
301 
302  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
303  infos,
304  BaseDescriptor(),
305  EmptyOptional(),
306  EmptyOptional(),
307  reasonIfUnsupported);
308 }
309 
311  const TensorInfo& output,
313 {
314  TensorInfos infos{input, output};
315 
316  return m_LayerSupport->IsLayerSupported(LayerType::ConvertBf16ToFp32,
317  infos,
318  BaseDescriptor(),
319  EmptyOptional(),
320  EmptyOptional(),
321  reasonIfUnsupported);
322 }
323 
325  const TensorInfo& output,
327 {
328  TensorInfos infos{input, output};
329 
330  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToBf16,
331  infos,
332  BaseDescriptor(),
333  EmptyOptional(),
334  EmptyOptional(),
335  reasonIfUnsupported);
336 }
337 
339  const TensorInfo& output,
341 {
342  TensorInfos infos{input, output};
343 
344  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
345  infos,
346  BaseDescriptor(),
347  EmptyOptional(),
348  EmptyOptional(),
349  reasonIfUnsupported);
350 }
351 
353  const TensorInfo& output,
355 {
356  TensorInfos infos{input, output};
357 
358  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
359  infos,
360  BaseDescriptor(),
361  EmptyOptional(),
362  EmptyOptional(),
363  reasonIfUnsupported);
364 }
365 
367  const TensorInfo& output,
369  const TensorInfo& weights,
370  const Optional<TensorInfo>& biases,
372 {
373  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
374  TensorInfos infos{input, output, weights, biasesVal};
375 
377  if(!m_BackendId.IsUndefined())
378  {
379  capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
380  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
381  {
382  if(!weights.IsConstant())
383  {
384  return false;
385  }
386  if (descriptor.m_BiasEnabled && !biases.has_value())
387  {
388  return false;
389  }
390 
391 
392  // At the first stage we will only print a warning. this is to give
393  // backend developers a chance to adopt and read weights from input slots.
394  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
395  "If you are a backend developer please find more information in our "
396  "doxygen documentation on github https://github.com/ARM-software/armnn "
397  "under the keyword 'ConstTensorsAsInputs'.";
398  }
399  }
400 
401  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
402  infos,
403  descriptor,
404  EmptyOptional(),
405  EmptyOptional(),
406  reasonIfUnsupported);
407 }
408 
410  const TensorInfo& output,
412  const TensorInfo& weights,
413  const Optional<TensorInfo>& biases,
415 {
416  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
417  TensorInfos infos{input, output, weights, biasesVal};
418 
419  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
420  infos,
421  descriptor,
422  EmptyOptional(),
423  EmptyOptional(),
424  reasonIfUnsupported);
425 }
426 
428  const TensorInfo& output,
430 {
431  TensorInfos infos{input, output};
432 
433  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
434  infos,
435  BaseDescriptor(),
436  EmptyOptional(),
437  EmptyOptional(),
438  reasonIfUnsupported);
439 }
440 
442  const TensorInfo& output,
445 {
446  TensorInfos infos{input, output};
447 
448  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
449  infos,
450  descriptor,
451  EmptyOptional(),
452  EmptyOptional(),
453  reasonIfUnsupported);
454 }
455 
457  const TensorInfo& input,
458  const TensorInfo& output,
460  const TensorInfo& weights,
461  const Optional<TensorInfo>& biases,
463 {
464  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
465  TensorInfos infos{input, output, weights, biasesVal};
466 
468  if(!m_BackendId.IsUndefined())
469  {
470  capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
471  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
472  {
473  if(!weights.IsConstant())
474  {
475  return false;
476  }
477  if(descriptor.m_BiasEnabled)
478  {
479  if(!biases.value().IsConstant())
480  {
481  return false;
482  }
483  }
484  // At the first stage we will only print a warning. this is to give
485  // backend developers a chance to adopt and read weights from input slots.
486  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
487  "If you are a backend developer please find more information in our "
488  "doxygen documentation on github https://github.com/ARM-software/armnn "
489  "under the keyword 'ConstTensorsAsInputs'.";
490  }
491  }
492 
493  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
494  infos,
495  descriptor,
496  EmptyOptional(),
497  EmptyOptional(),
498  reasonIfUnsupported);
499 }
500 
502  const TensorInfo& output,
504 {
505  TensorInfos infos{input, output};
506 
507  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
508  infos,
509  BaseDescriptor(),
510  EmptyOptional(),
511  EmptyOptional(),
512  reasonIfUnsupported);
513 }
514 
516  const TensorInfo& scores,
517  const TensorInfo& anchors,
518  const TensorInfo& detectionBoxes,
519  const TensorInfo& detectionClasses,
520  const TensorInfo& detectionScores,
521  const TensorInfo& numDetections,
524 {
525  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
526 
527  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
528  infos,
529  descriptor,
530  EmptyOptional(),
531  EmptyOptional(),
532  reasonIfUnsupported);
533 }
534 
536  const TensorInfo& input,
537  const TensorInfo& output,
539  const TensorInfo& weights,
540  const Optional<TensorInfo>& biases,
542 {
543  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
544  TensorInfos infos{input, output, weights, biasesVal};
545 
547  if(!m_BackendId.IsUndefined())
548  {
549  capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
550  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
551  {
552  if(!weights.IsConstant())
553  {
554  return false;
555  }
556  if(descriptor.m_BiasEnabled)
557  {
558  if(!biases.value().IsConstant())
559  {
560  return false;
561  }
562  }
563  // At the first stage we will only print a warning. this is to give
564  // backend developers a chance to adopt and read weights from input slots.
565  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
566  "If you are a backend developer please find more information in our "
567  "doxygen documentation on github https://github.com/ARM-software/armnn "
568  "under the keyword 'ConstTensorsAsInputs'.";
569  }
570  }
571 
572  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
573  infos,
574  descriptor,
575  EmptyOptional(),
576  EmptyOptional(),
577  reasonIfUnsupported);
578 }
579 
581  const TensorInfo& input1,
582  const TensorInfo& output,
584 {
585  TensorInfos infos{input0, input1, output};
586 
587  return m_LayerSupport->IsLayerSupported(LayerType::Division,
588  infos,
589  BaseDescriptor(),
590  EmptyOptional(),
591  EmptyOptional(),
592  reasonIfUnsupported);
593 }
594 
596  const TensorInfo& output,
599 {
600  TensorInfos infos{input, output};
601 
602  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
603  infos,
604  descriptor,
605  EmptyOptional(),
606  EmptyOptional(),
607  reasonIfUnsupported);
608 }
609 
613 {
614  TensorInfos infos{input};
615 
616  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
617  infos,
618  descriptor,
619  EmptyOptional(),
620  EmptyOptional(),
621  reasonIfUnsupported);
622 }
623 
625  const TensorInfo& output,
626  const FillDescriptor& descriptor,
628 {
629  TensorInfos infos{input, output};
630 
631  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
632  infos,
633  descriptor,
634  EmptyOptional(),
635  EmptyOptional(),
636  reasonIfUnsupported);
637 }
638 
640  const TensorInfo& output,
642 {
643  TensorInfos infos{input, output};
644 
645  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
646  infos,
647  BaseDescriptor(),
648  EmptyOptional(),
649  EmptyOptional(),
650  reasonIfUnsupported);
651 }
652 
654  const TensorInfo& output,
655  const TensorInfo& weights,
656  const TensorInfo& biases,
659 {
660  if(!m_BackendId.IsUndefined())
661  {
662  auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
663  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
664  {
665  if(!weights.IsConstant())
666  {
667  if (reasonIfUnsupported.has_value())
668  {
669  reasonIfUnsupported.value() =
670  "This backend might not support non constant weights. "
671  "If weights are constant make sure to set IsConstant when creating TensorInfo";
672  }
673 
674  return false;
675  }
676  if(descriptor.m_BiasEnabled)
677  {
678  if(!biases.IsConstant())
679  {
680  if (reasonIfUnsupported.has_value())
681  {
682  reasonIfUnsupported.value() =
683  "This backend might not support non constant bias. "
684  "If bias are constant make sure to set IsConstant when creating TensorInfo";
685  }
686  return false;
687  }
688  }
689 
690  // At the first stage we will only print a warning. this is to give
691  // backend developers a chance to adopt and read weights from input slots.
692  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
693  "If you are a backend developer please find more information in our "
694  "doxygen documentation on github https://github.com/ARM-software/armnn "
695  "under the keyword 'ConstTensorsAsInputs'.";
696  }
697 
698  if(!descriptor.m_ConstantWeights)
699  {
700  capability = GetCapability("NonConstWeights", m_BackendId);
701  if (capability.has_value() && capability.value().GetValue().AsBool() == true)
702  {
703  return true;
704  }
705  return false;
706  }
707  }
708 
709  TensorInfos infos{input, output, weights, biases};
710 
711  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
712  infos,
713  descriptor,
714  EmptyOptional(),
715  EmptyOptional(),
716  reasonIfUnsupported);
717 }
718 
720  const TensorInfo& input1,
721  const TensorInfo& output,
724 {
725  TensorInfos infos{input0, input1, output};
726 
727  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
728  infos,
729  descriptor,
730  EmptyOptional(),
731  EmptyOptional(),
732  reasonIfUnsupported);
733 }
734 
736  const TensorInfo& input1,
737  const TensorInfo& output,
739 {
740  TensorInfos infos{input0, input1, output};
741 
742  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
743  infos,
744  BaseDescriptor(),
745  EmptyOptional(),
746  EmptyOptional(),
747  reasonIfUnsupported);
748 }
749 
752 {
753  TensorInfos infos{input};
754 
755  return m_LayerSupport->IsLayerSupported(LayerType::Input,
756  infos,
757  BaseDescriptor(),
758  EmptyOptional(),
759  EmptyOptional(),
760  reasonIfUnsupported);
761 }
762 
764  const TensorInfo& input,
765  const TensorInfo& output,
768 {
769  TensorInfos infos{input, output};
770 
771  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
772  infos,
773  descriptor,
774  EmptyOptional(),
775  EmptyOptional(),
776  reasonIfUnsupported);
777 }
778 
780  const TensorInfo& output,
783 {
784  TensorInfos infos{input, output};
785 
786  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
787  infos,
788  descriptor,
789  EmptyOptional(),
790  EmptyOptional(),
791  reasonIfUnsupported);
792 }
793 
795  const TensorInfo& input1,
796  const TensorInfo& output,
799 {
800  TensorInfos infos{input0, input1, output};
801 
802  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
803  infos,
804  descriptor,
805  EmptyOptional(),
806  EmptyOptional(),
807  reasonIfUnsupported);
808 }
809 
811  const TensorInfo& output,
814 {
815  TensorInfos infos{input, output};
816 
817  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
818  infos,
819  descriptor,
820  EmptyOptional(),
821  EmptyOptional(),
822  reasonIfUnsupported);
823 }
824 
826  const TensorInfo& output,
829 {
830  TensorInfos infos{input, output};
831 
832  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
833  infos,
834  descriptor,
835  EmptyOptional(),
836  EmptyOptional(),
837  reasonIfUnsupported);
838 }
839 
841  const TensorInfo& outputStateIn,
842  const TensorInfo& cellStateIn,
843  const TensorInfo& scratchBuffer,
844  const TensorInfo& outputStateOut,
845  const TensorInfo& cellStateOut,
846  const TensorInfo& output,
847  const LstmDescriptor& descriptor,
850 {
851  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
852 
853  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
854  infos,
855  descriptor,
856  paramsInfo,
857  EmptyOptional(),
858  reasonIfUnsupported);
859 }
860 
862  const TensorInfo& input1,
863  const TensorInfo& output,
865 {
866  TensorInfos infos{input0, input1, output};
867 
868  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
869  infos,
870  BaseDescriptor(),
871  EmptyOptional(),
872  EmptyOptional(),
873  reasonIfUnsupported);
874 }
875 
877  const TensorInfo& output,
878  const MeanDescriptor& descriptor,
880 {
881  TensorInfos infos{input, output};
882 
883  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
884  infos,
885  descriptor,
886  EmptyOptional(),
887  EmptyOptional(),
888  reasonIfUnsupported);
889 }
890 
892  const TensorInfo& output,
894 {
895  TensorInfos infos{input, output};
896 
897  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
898  infos,
899  BaseDescriptor(),
900  EmptyOptional(),
901  EmptyOptional(),
902  reasonIfUnsupported);
903 }
904 
906  const TensorInfo& output,
908 {
909  TensorInfos infos{input, output};
910 
911  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
912  infos,
913  BaseDescriptor(),
914  EmptyOptional(),
915  EmptyOptional(),
916  reasonIfUnsupported);
917 }
918 
920  const TensorInfo& input1,
921  const TensorInfo& output,
923 {
924  TensorInfos infos{input0, input1, output};
925 
926  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
927  infos,
928  BaseDescriptor(),
929  EmptyOptional(),
930  EmptyOptional(),
931  reasonIfUnsupported);
932 }
933 
935  const TensorInfo& input1,
936  const TensorInfo& output,
938 {
939  TensorInfos infos{input0, input1, output};
940 
941  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
942  infos,
943  BaseDescriptor(),
944  EmptyOptional(),
945  EmptyOptional(),
946  reasonIfUnsupported);
947 }
948 
950  const TensorInfo& input1,
951  const TensorInfo& output,
953 {
954  TensorInfos infos{input0, input1, output};
955 
956  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
957  infos,
958  BaseDescriptor(),
959  EmptyOptional(),
960  EmptyOptional(),
961  reasonIfUnsupported);
962 }
963 
965  const TensorInfo& output,
968 {
969  TensorInfos infos{input, output};
970 
971  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
972  infos,
973  descriptor,
974  EmptyOptional(),
975  EmptyOptional(),
976  reasonIfUnsupported);
977 }
978 
981 {
982  TensorInfos infos{output};
983 
984  return m_LayerSupport->IsLayerSupported(LayerType::Output,
985  infos,
986  BaseDescriptor(),
987  EmptyOptional(),
988  EmptyOptional(),
989  reasonIfUnsupported);
990 }
991 
993  const TensorInfo& output,
994  const PadDescriptor& descriptor,
996 {
997  TensorInfos infos{input, output};
998 
999  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
1000  infos,
1001  descriptor,
1002  EmptyOptional(),
1003  EmptyOptional(),
1004  reasonIfUnsupported);
1005 }
1006 
1008  const TensorInfo& output,
1011 {
1012  TensorInfos infos{input, output};
1013 
1014  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
1015  infos,
1016  descriptor,
1017  EmptyOptional(),
1018  EmptyOptional(),
1019  reasonIfUnsupported);
1020 }
1021 
1023  const TensorInfo& output,
1026 {
1027  TensorInfos infos{input, output};
1028 
1029  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
1030  infos,
1031  descriptor,
1032  EmptyOptional(),
1033  EmptyOptional(),
1034  reasonIfUnsupported);
1035 }
1036 
1038  const TensorInfo& output,
1041 {
1042  TensorInfos infos{input, output};
1043 
1044  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
1045  infos,
1046  descriptor,
1047  EmptyOptional(),
1048  EmptyOptional(),
1049  reasonIfUnsupported);
1050 }
1051 
1055 {
1056  TensorInfos infos{input};
1057 
1058  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
1059  infos,
1060  descriptor,
1061  EmptyOptional(),
1062  EmptyOptional(),
1063  reasonIfUnsupported);
1064 }
1065 
1067  const TensorInfo& alpha,
1068  const TensorInfo& output,
1070 {
1071  TensorInfos infos{input, alpha, output};
1072 
1073  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
1074  infos,
1075  BaseDescriptor(),
1076  EmptyOptional(),
1077  EmptyOptional(),
1078  reasonIfUnsupported);
1079 }
1080 
1082  const TensorInfo& output,
1084 {
1085  TensorInfos infos{input, output};
1086 
1087  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
1088  infos,
1089  BaseDescriptor(),
1090  EmptyOptional(),
1091  EmptyOptional(),
1092  reasonIfUnsupported);
1093 }
1094 
1096  const TensorInfo& previousOutputIn,
1097  const TensorInfo& previousCellStateIn,
1098  const TensorInfo& outputStateOut,
1099  const TensorInfo& cellStateOut,
1100  const TensorInfo& output,
1101  const QLstmDescriptor& descriptor,
1104 {
1105  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1106 
1107  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1108  infos,
1109  descriptor,
1110  paramsInfo,
1111  EmptyOptional(),
1112  reasonIfUnsupported);
1113 }
1114 
1116  const TensorInfo& previousCellStateIn,
1117  const TensorInfo& previousOutputIn,
1118  const TensorInfo& cellStateOut,
1119  const TensorInfo& output,
1122 {
1123  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1124 
1125  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1126  infos,
1127  BaseDescriptor(),
1128  EmptyOptional(),
1129  paramsInfo,
1130  reasonIfUnsupported);
1131 }
1132 
1134  const TensorInfo& output,
1136 {
1137  TensorInfos infos{input, output};
1138 
1139  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1140  infos,
1141  BaseDescriptor(),
1142  EmptyOptional(),
1143  EmptyOptional(),
1144  reasonIfUnsupported);
1145 }
1146 
1148  const TensorInfo& output,
1151 {
1152  TensorInfos infos{input, output};
1153 
1154  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1155  infos,
1156  descriptor,
1157  EmptyOptional(),
1158  EmptyOptional(),
1159  reasonIfUnsupported);
1160 }
1161 
1163  const TensorInfo& output,
1166 {
1167  TensorInfos infos{input, output};
1168 
1169  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1170  infos,
1171  descriptor,
1172  EmptyOptional(),
1173  EmptyOptional(),
1174  reasonIfUnsupported);
1175 }
1176 
1178  const TensorInfo& output,
1181 {
1182  TensorInfos infos{input, output};
1183 
1184  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1185  infos,
1186  descriptor,
1187  EmptyOptional(),
1188  EmptyOptional(),
1189  reasonIfUnsupported);
1190 }
1191 
1193  const TensorInfo& output,
1195 {
1196  TensorInfos infos{input, output};
1197 
1198  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1199  infos,
1200  BaseDescriptor(),
1201  EmptyOptional(),
1202  EmptyOptional(),
1203  reasonIfUnsupported);
1204 }
1205 
1207  const TensorInfo& output,
1208  const SliceDescriptor& descriptor,
1210 {
1211  TensorInfos infos{input, output};
1212 
1213  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1214  infos,
1215  descriptor,
1216  EmptyOptional(),
1217  EmptyOptional(),
1218  reasonIfUnsupported);
1219 }
1220 
1222  const TensorInfo& output,
1225 {
1226  TensorInfos infos{input, output};
1227 
1228  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1229  infos,
1230  descriptor,
1231  EmptyOptional(),
1232  EmptyOptional(),
1233  reasonIfUnsupported);
1234 }
1235 
1237  const TensorInfo& output,
1240 {
1241  TensorInfos infos{input, output};
1242 
1243  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1244  infos,
1245  descriptor,
1246  EmptyOptional(),
1247  EmptyOptional(),
1248  reasonIfUnsupported);
1249 }
1250 
1252  const TensorInfo& output,
1255 {
1256  TensorInfos infos{input, output};
1257 
1258  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1259  infos,
1260  descriptor,
1261  EmptyOptional(),
1262  EmptyOptional(),
1263  reasonIfUnsupported);
1264 }
1265 
1267  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1268  const ViewsDescriptor& descriptor,
1270 {
1271  TensorInfos infos{input};
1272  for (TensorInfo outInfo : outputs)
1273  {
1274  infos.push_back(outInfo);
1275  }
1276 
1277  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1278  infos,
1279  descriptor,
1280  EmptyOptional(),
1281  EmptyOptional(),
1282  reasonIfUnsupported);
1283 }
1284 
1285 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1286  const TensorInfo& output,
1287  const StackDescriptor& descriptor,
1289 {
1290  TensorInfos infos;
1291  for (const TensorInfo* inputInfo : inputs)
1292  {
1293  infos.push_back(*inputInfo);
1294  }
1295  infos.push_back(output);
1296 
1297  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1298  infos,
1299  descriptor,
1300  EmptyOptional(),
1301  EmptyOptional(),
1302  reasonIfUnsupported);
1303 }
1304 
1305 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
1306  const std::vector<const TensorInfo*>& outputs,
1309 {
1310  TensorInfos infos;
1311  for (const TensorInfo* inputInfo : inputs)
1312  {
1313  infos.push_back(*inputInfo);
1314  }
1315  for (const TensorInfo* outputInfo : outputs)
1316  {
1317  infos.push_back(*outputInfo);
1318  }
1319 
1320  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1321  infos,
1322  descriptor,
1323  EmptyOptional(),
1324  EmptyOptional(),
1325  reasonIfUnsupported);
1326 }
1327 
1328 
1330  const TensorInfo& output,
1333 {
1334  TensorInfos infos{input, output};
1335 
1336  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1337  infos,
1338  descriptor,
1339  EmptyOptional(),
1340  EmptyOptional(),
1341  reasonIfUnsupported);
1342 }
1343 
1345  const TensorInfo& input1,
1346  const TensorInfo& output,
1348 {
1349  TensorInfos infos{input0, input1, output};
1350 
1351  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1352  infos,
1353  BaseDescriptor(),
1354  EmptyOptional(),
1355  EmptyOptional(),
1356  reasonIfUnsupported);
1357 }
1358 
1360  const TensorInfo& input1,
1361  const TensorInfo& output0,
1362  const TensorInfo& output1,
1364 {
1365  TensorInfos infos{input0, input1, output0, output1};
1366 
1367  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1368  infos,
1369  BaseDescriptor(),
1370  EmptyOptional(),
1371  EmptyOptional(),
1372  reasonIfUnsupported);
1373 }
1374 
1376  const TensorInfo& input,
1377  const TensorInfo& output,
1379  const TensorInfo& weights,
1380  const Optional<TensorInfo>& biases,
1382 {
1383  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1384  TensorInfos infos{input, output, weights, biasesVal};
1385 
1386  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1387  infos,
1388  descriptor,
1389  EmptyOptional(),
1390  EmptyOptional(),
1391  reasonIfUnsupported);
1392 }
1393 
1395  const TensorInfo& output,
1398 {
1399  TensorInfos infos{input, output};
1400 
1401  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1402  infos,
1403  descriptor,
1404  EmptyOptional(),
1405  EmptyOptional(),
1406  reasonIfUnsupported);
1407 }
1408 
1409 // Forwarding function to maintain ABI stability
1411  const TensorInfo& outputStateIn,
1412  const TensorInfo& cellStateIn,
1413  const TensorInfo& output,
1416  const LstmDescriptor& descriptor,
1419 {
1420  TensorInfo hiddenStateOutputVal = hiddenStateOutput.has_value() ? hiddenStateOutput.value() : TensorInfo();
1421  TensorInfo cellStateOutputVal = cellStateOutput.has_value() ? cellStateOutput.value() : TensorInfo();
1422  TensorInfos infos{input, outputStateIn, cellStateIn, hiddenStateOutputVal, cellStateOutputVal, output};
1423 
1425  outputStateIn,
1426  cellStateIn,
1427  hiddenStateOutputVal,
1428  cellStateOutputVal,
1429  output,
1430  descriptor,
1431  paramsInfo,
1432  reasonIfUnsupported);
1433 }
1434 
1436  const TensorInfo& outputStateIn,
1437  const TensorInfo& cellStateIn,
1438  const TensorInfo& outputStateOut,
1439  const TensorInfo& cellStateOut,
1440  const TensorInfo& output,
1441  const LstmDescriptor& descriptor,
1444 {
1445  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
1446 
1447  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1448  infos,
1449  descriptor,
1450  paramsInfo,
1451  EmptyOptional(),
1452  reasonIfUnsupported);
1453 }
1454 
1455 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
const BackendOption & GetOption(size_t idx) const
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConstant() const
Definition: Tensor.cpp:509
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const TensorInfo const TensorInfo & cellStateIn
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor & descriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo const TensorInfo const TensorInfo & output
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > & cellStateOutput
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Base class for all descriptors.
Definition: Descriptors.hpp:22
A StackDescriptor for the StackLayer.
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
size_t GetOptionCount() const noexcept
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
A L2NormalizationDescriptor for the L2NormalizationLayer.
BackendCapability
BackendCapability class.
Definition: Types.hpp:267
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
std::string AsString() const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
const TensorInfo & outputStateIn
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > & hiddenStateOutput
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling3dDescriptor for the Pooling3dLayer.
unsigned int AsUnsignedInt() const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool AsBool() const
Value getters.
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBool() const
Type getters.
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
std::vector< TensorInfo > TensorInfos
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:141
A SoftmaxDescriptor for the SoftmaxLayer.
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor const LstmInputParamsInfo & paramsInfo
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_ConstantWeights
Enable/disable constant weights and biases.