ArmNN
 21.11
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <armnn/Logging.hpp>
9 
11 
12 namespace armnn
13 {
14 
15 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
17 {
19 
20  if (!backendRegistry.IsBackendRegistered(backend))
21  {
22  return LayerSupportHandle(nullptr);
23  }
24 
25  auto factoryFunc = backendRegistry.GetFactory(backend);
26  auto backendObject = factoryFunc();
27  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
28 }
29 
30 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
31  const BackendCapabilities& capabilities)
32 {
33  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
34  {
35  const auto& capability = capabilities.GetOption(i);
36  if (backendCapabilityName == capability.GetName())
37  {
38  return capability;
39  }
40  }
41  return EmptyOptional();
42 }
43 
44 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
45  const armnn::BackendId& backend)
46 {
47  auto const& backendRegistry = armnn::BackendRegistryInstance();
48  if (backendRegistry.IsBackendRegistered(backend))
49  {
50  auto factoryFunc = backendRegistry.GetFactory(backend);
51  auto backendObject = factoryFunc();
52  auto capabilities = backendObject->GetCapabilities();
53  return GetCapability(backendCapabilityName, capabilities);
54  }
55  return EmptyOptional();
56 }
57 
58 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
59 {
60  return GetCapability(name, capabilities).has_value();
61 }
62 
63 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
64 {
65  return GetCapability(name, backend).has_value();
66 }
67 
68 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
69 {
70  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
71  {
72  const auto& backendCapability = capabilities.GetOption(i);
73  if (capability.GetName() == backendCapability.GetName())
74  {
75  if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
76  {
77  return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
78  }
79  else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
80  {
81  return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
82  }
83  else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
84  {
85  return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
86  }
87  else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
88  {
89  return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
90  }
91  else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
92  {
93  return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
94  }
95  }
96  }
97  return false;
98 }
99 
100 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
101 {
102  auto const& backendRegistry = armnn::BackendRegistryInstance();
103  if (backendRegistry.IsBackendRegistered(backend))
104  {
105  auto factoryFunc = backendRegistry.GetFactory(backend);
106  auto backendObject = factoryFunc();
107  auto capabilities = backendObject->GetCapabilities();
108  return HasCapability(backendOption, capabilities);
109  }
110  return false;
111 }
112 
113 /// Convenience function to check a capability on a backend
115 {
116  bool hasCapability = false;
117  auto const& backendRegistry = armnn::BackendRegistryInstance();
118  if (backendRegistry.IsBackendRegistered(backend))
119  {
120  auto factoryFunc = backendRegistry.GetFactory(backend);
121  auto backendObject = factoryFunc();
123  hasCapability = backendObject->HasCapability(capability);
125  }
126  return hasCapability;
127 }
128 
129 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
130 {
131  auto const& backendRegistry = armnn::BackendRegistryInstance();
132  if (backendRegistry.IsBackendRegistered(backend))
133  {
134  auto factoryFunc = backendRegistry.GetFactory(backend);
135  auto backendObject = factoryFunc();
136  return backendObject->GetNumberOfCacheFiles();
137  }
138  return 0;
139 }
140 
142 {
143  if (m_LayerSupport)
144  {
145  return true;
146  }
147 
148  return false;
149 }
150 
152  const TensorInfo& output,
153  const ActivationDescriptor& descriptor,
154  Optional<std::string&> reasonIfUnsupported)
155 {
156  return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
157 }
158 
160  const TensorInfo& input1,
161  const TensorInfo& output,
162  Optional<std::string&> reasonIfUnsupported)
163 {
164  return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
165 }
166 
168  const TensorInfo& output,
169  const ArgMinMaxDescriptor& descriptor,
170  Optional<std::string&> reasonIfUnsupported)
171 {
172  return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
173 }
174 
176  const TensorInfo& output,
177  const TensorInfo& mean,
178  const TensorInfo& var,
179  const TensorInfo& beta,
180  const TensorInfo& gamma,
181  const BatchNormalizationDescriptor& descriptor,
182  Optional<std::string&> reasonIfUnsupported)
183 {
184  return m_LayerSupport->IsBatchNormalizationSupported(input,
185  output,
186  mean,
187  var,
188  beta,
189  gamma,
190  descriptor,
191  reasonIfUnsupported.value());
192 }
193 
195  const TensorInfo& output,
196  const BatchToSpaceNdDescriptor& descriptor,
197  Optional<std::string&> reasonIfUnsupported)
198 {
199  return m_LayerSupport->IsBatchToSpaceNdSupported(input,
200  output,
201  descriptor,
202  reasonIfUnsupported.value());
203 }
204 
206  const TensorInfo& output,
207  Optional<std::string&> reasonIfUnsupported)
208 {
209  return m_LayerSupport->IsCastSupported(input, output, reasonIfUnsupported.value());
210 }
211 
213  const ChannelShuffleDescriptor &descriptor,
214  Optional<std::string &> reasonIfUnsupported)
215 {
216  return m_LayerSupport->IsChannelShuffleSupported(input,
217  output,
218  descriptor,
219  reasonIfUnsupported.value());
220 }
221 
223  const TensorInfo& input1,
224  const TensorInfo& output,
225  const ComparisonDescriptor& descriptor,
226  Optional<std::string&> reasonIfUnsupported)
227 {
228  return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
229 }
230 
231 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
232  const TensorInfo& output,
233  const OriginsDescriptor& descriptor,
234  Optional<std::string&> reasonIfUnsupported)
235 {
236  return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
237 }
238 
240  Optional<std::string&> reasonIfUnsupported)
241 {
242  return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
243 }
244 
246  const TensorInfo& output,
247  Optional<std::string&> reasonIfUnsupported)
248 {
249  return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
250 }
251 
253  const TensorInfo& output,
254  Optional<std::string&> reasonIfUnsupported)
255 {
256  return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
257 }
258 
260  const TensorInfo& output,
261  Optional<std::string&> reasonIfUnsupported)
262 {
263  return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
264 }
265 
267  const TensorInfo& output,
268  Optional<std::string&> reasonIfUnsupported)
269 {
270  return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
271 }
272 
274  const TensorInfo& output,
275  const Convolution2dDescriptor& descriptor,
276  const TensorInfo& weights,
277  const Optional<TensorInfo>& biases,
278  Optional<std::string&> reasonIfUnsupported)
279 {
280  return m_LayerSupport->IsConvolution2dSupported(input,
281  output,
282  descriptor,
283  weights,
284  biases,
285  reasonIfUnsupported.value());
286 }
287 
289  const TensorInfo& output,
290  const Convolution3dDescriptor& descriptor,
291  const TensorInfo& weights,
292  const Optional<TensorInfo>& biases,
293  Optional<std::string&> reasonIfUnsupported)
294 {
295  return m_LayerSupport->IsConvolution3dSupported(input,
296  output,
297  descriptor,
298  weights,
299  biases,
300  reasonIfUnsupported.value());
301 }
302 
304  const TensorInfo& output,
305  Optional<std::string&> reasonIfUnsupported)
306 {
307  return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
308 }
309 
311  const TensorInfo& output,
312  const DepthToSpaceDescriptor& descriptor,
313  Optional<std::string&> reasonIfUnsupported)
314 {
315  return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
316 }
317 
319  const TensorInfo& input,
320  const TensorInfo& output,
321  const DepthwiseConvolution2dDescriptor& descriptor,
322  const TensorInfo& weights,
323  const Optional<TensorInfo>& biases,
324  Optional<std::string&> reasonIfUnsupported)
325 {
326  return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
327  output,
328  descriptor,
329  weights,
330  biases,
331  reasonIfUnsupported.value());
332 }
333 
335  const TensorInfo& output,
336  Optional<std::string&> reasonIfUnsupported)
337 {
338  return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
339 }
340 
342  const TensorInfo& scores,
343  const TensorInfo& anchors,
344  const TensorInfo& detectionBoxes,
345  const TensorInfo& detectionClasses,
346  const TensorInfo& detectionScores,
347  const TensorInfo& numDetections,
348  const DetectionPostProcessDescriptor& descriptor,
349  Optional<std::string&> reasonIfUnsupported)
350 {
351  return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
352  scores,
353  anchors,
354  detectionBoxes,
355  detectionClasses,
356  detectionScores,
357  numDetections,
358  descriptor,
359  reasonIfUnsupported);
360 }
361 
363  const TensorInfo& input,
364  const TensorInfo& output,
365  const DepthwiseConvolution2dDescriptor& descriptor,
366  const TensorInfo& weights,
367  const Optional<TensorInfo>& biases,
368  Optional<std::string&> reasonIfUnsupported)
369 {
370  return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
371  output,
372  descriptor,
373  weights,
374  biases,
375  reasonIfUnsupported);
376 }
377 
379  const TensorInfo& input1,
380  const TensorInfo& output,
381  Optional<std::string&> reasonIfUnsupported)
382 {
383  return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
384 }
385 
387  const TensorInfo& output,
388  const ElementwiseUnaryDescriptor& descriptor,
389  Optional<std::string&> reasonIfUnsupported)
390 {
391  return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
392 }
393 
395  const FakeQuantizationDescriptor& descriptor,
396  Optional<std::string&> reasonIfUnsupported)
397 {
398  return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
399 }
400 
402  const TensorInfo& output,
403  const FillDescriptor& descriptor,
404  Optional<std::string&> reasonIfUnsupported)
405 {
406  return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
407 }
408 
410  const TensorInfo& output,
411  Optional<std::string&> reasonIfUnsupported)
412 {
413  return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
414 }
415 
417  const TensorInfo& output,
418  const TensorInfo& weights,
419  const TensorInfo& biases,
420  const FullyConnectedDescriptor& descriptor,
421  Optional<std::string&> reasonIfUnsupported)
422 {
423  if(!m_BackendId.IsUndefined())
424  {
425  auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
426  if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
427  {
428  if(!weights.IsConstant())
429  {
430  reasonIfUnsupported.value() =
431  "This backend might not support non constant weights. "
432  "If weights are constant make sure to set IsConstant when creating TensorInfo";
433  return false;
434  }
435  if(descriptor.m_BiasEnabled)
436  {
437  if(!biases.IsConstant())
438  {
439  reasonIfUnsupported.value() =
440  "This backend might not support non constant bias. "
441  "If bias are constant make sure to set IsConstant when creating TensorInfo";
442  return false;
443  }
444  }
445 
446  // At the first stage we will only print a warning. this is to give
447  // backend developers a chance to adopt and read weights from input slots.
448  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
449  "If you are a backend developer please find more information in our "
450  "doxygen documentation on github https://github.com/ARM-software/armnn "
451  "under the keyword 'ConstTensorsAsInputs'.";
452  }
453 
454  if(!descriptor.m_ConstantWeights)
455  {
456  auto capability = GetCapability("NonConstWeights", m_BackendId);
457  if (capability.has_value() && capability.value().GetValue().AsBool() == true)
458  {
459  return true;
460  }
461  return false;
462  }
463  }
464 
465  return m_LayerSupport->IsFullyConnectedSupported(input,
466  output,
467  weights,
468  biases,
469  descriptor,
470  reasonIfUnsupported.value());
471 }
472 
474  const TensorInfo& input1,
475  const TensorInfo& output,
476  const GatherDescriptor& descriptor,
477  Optional<std::string&> reasonIfUnsupported)
478 {
479  return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
480 }
481 
483  Optional<std::string&> reasonIfUnsupported)
484 {
485  return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
486 }
487 
489  const TensorInfo& input,
490  const TensorInfo& output,
491  const InstanceNormalizationDescriptor& descriptor,
492  Optional<std::string&> reasonIfUnsupported)
493 {
494  return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
495 }
496 
498  const TensorInfo& output,
499  const L2NormalizationDescriptor& descriptor,
500  Optional<std::string&> reasonIfUnsupported)
501 {
502  return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
503 }
504 
506  const TensorInfo& input1,
507  const TensorInfo& output,
508  const LogicalBinaryDescriptor& descriptor,
509  Optional<std::string&> reasonIfUnsupported)
510 {
511  return m_LayerSupport->IsLogicalBinarySupported(input0,
512  input1,
513  output,
514  descriptor,
515  reasonIfUnsupported.value());
516 }
517 
519  const TensorInfo& output,
520  const ElementwiseUnaryDescriptor& descriptor,
521  Optional<std::string&> reasonIfUnsupported)
522 {
523  return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
524 }
525 
527  const TensorInfo& output,
528  const LogSoftmaxDescriptor& descriptor,
529  Optional<std::string&> reasonIfUnsupported)
530 {
531  return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
532 }
533 
535  const TensorInfo& outputStateIn,
536  const TensorInfo& cellStateIn,
537  const TensorInfo& scratchBuffer,
538  const TensorInfo& outputStateOut,
539  const TensorInfo& cellStateOut,
540  const TensorInfo& output,
541  const LstmDescriptor& descriptor,
542  const LstmInputParamsInfo& paramsInfo,
543  Optional<std::string&> reasonIfUnsupported)
544 {
545  return m_LayerSupport->IsLstmSupported(input,
546  outputStateIn,
547  cellStateIn,
548  scratchBuffer,
549  outputStateOut,
550  cellStateOut,
551  output,
552  descriptor,
553  paramsInfo,
554  reasonIfUnsupported);
555 }
556 
558  const TensorInfo& input1,
559  const TensorInfo& output,
560  Optional<std::string&> reasonIfUnsupported)
561 {
562  return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
563 }
564 
566  const TensorInfo& output,
567  const MeanDescriptor& descriptor,
568  Optional<std::string&> reasonIfUnsupported)
569 {
570  return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
571 }
572 
574  const TensorInfo& output,
575  Optional<std::string&> reasonIfUnsupported)
576 {
577  return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
578 }
579 
581  const TensorInfo& output,
582  Optional<std::string&> reasonIfUnsupported)
583 {
584  return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
585 }
586 
588  const TensorInfo& input1,
589  const TensorInfo& output,
590  Optional<std::string&> reasonIfUnsupported)
591 {
592  return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
593 }
594 
596  const TensorInfo& input1,
597  const TensorInfo& output,
598  Optional<std::string&> reasonIfUnsupported)
599 {
600  return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
601 }
602 
604  const TensorInfo& input1,
605  const TensorInfo& output,
606  Optional<std::string&> reasonIfUnsupported)
607 {
608  return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
609 }
610 
612  const TensorInfo& output,
613  const NormalizationDescriptor& descriptor,
614  Optional<std::string&> reasonIfUnsupported)
615 {
616  return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
617 }
618 
620  Optional<std::string&> reasonIfUnsupported)
621 {
622  return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
623 }
624 
626  const TensorInfo& output,
627  const PadDescriptor& descriptor,
628  Optional<std::string&> reasonIfUnsupported)
629 {
630  return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
631 }
632 
634  const TensorInfo& output,
635  const PermuteDescriptor& descriptor,
636  Optional<std::string&> reasonIfUnsupported)
637 {
638  return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
639 }
640 
642  const TensorInfo& output,
643  const Pooling2dDescriptor& descriptor,
644  Optional<std::string&> reasonIfUnsupported)
645 {
646  return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
647 }
648 
650  const PreCompiledDescriptor& descriptor,
651  Optional<std::string&> reasonIfUnsupported)
652 {
653  return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
654 }
655 
657  const TensorInfo& alpha,
658  const TensorInfo& output,
659  Optional<std::string&> reasonIfUnsupported)
660 {
661  return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
662 }
663 
665  const TensorInfo& output,
666  Optional<std::string&> reasonIfUnsupported)
667 {
668  return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
669 }
670 
672  const TensorInfo& previousOutputIn,
673  const TensorInfo& previousCellStateIn,
674  const TensorInfo& outputStateOut,
675  const TensorInfo& cellStateOut,
676  const TensorInfo& output,
677  const QLstmDescriptor& descriptor,
678  const LstmInputParamsInfo& paramsInfo,
679  Optional<std::string&> reasonIfUnsupported)
680 {
681  return m_LayerSupport->IsQLstmSupported(input,
682  previousOutputIn,
683  previousCellStateIn,
684  outputStateOut,
685  cellStateOut,
686  output,
687  descriptor,
688  paramsInfo,
689  reasonIfUnsupported);
690 }
691 
693  const TensorInfo& previousCellStateIn,
694  const TensorInfo& previousOutputIn,
695  const TensorInfo& cellStateOut,
696  const TensorInfo& output,
697  const QuantizedLstmInputParamsInfo& paramsInfo,
698  Optional<std::string&> reasonIfUnsupported)
699 {
700  return m_LayerSupport->IsQuantizedLstmSupported(input,
701  previousCellStateIn,
702  previousOutputIn,
703  cellStateOut,
704  output,
705  paramsInfo,
706  reasonIfUnsupported);
707 }
708 
710  const TensorInfo& output,
711  Optional<std::string&> reasonIfUnsupported)
712 {
713  return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
714 }
715 
717  const TensorInfo& output,
718  const ReduceDescriptor& descriptor,
719  Optional<std::string&> reasonIfUnsupported)
720 {
721  return m_LayerSupport->IsReduceSupported(input, output, descriptor, reasonIfUnsupported.value());
722 }
723 
725  const TensorInfo& output,
726  const ReshapeDescriptor& descriptor,
727  Optional<std::string&> reasonIfUnsupported)
728 {
729  return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
730 }
731 
733  const TensorInfo& output,
734  const ResizeDescriptor& descriptor,
735  Optional<std::string&> reasonIfUnsupported)
736 {
737  return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
738 }
739 
741  const TensorInfo& output,
742  Optional<std::string&> reasonIfUnsupported)
743 {
744  return m_LayerSupport->IsShapeSupported(input, output, reasonIfUnsupported.value());
745 }
746 
748  const TensorInfo& output,
749  const SliceDescriptor& descriptor,
750  Optional<std::string&> reasonIfUnsupported)
751 {
752  return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
753 }
754 
756  const TensorInfo& output,
757  const SoftmaxDescriptor& descriptor,
758  Optional<std::string&> reasonIfUnsupported)
759 {
760  return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
761 }
762 
764  const TensorInfo& output,
765  const SpaceToBatchNdDescriptor& descriptor,
766  Optional<std::string&> reasonIfUnsupported)
767 {
768  return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
769 }
770 
772  const TensorInfo& output,
773  const SpaceToDepthDescriptor& descriptor,
774  Optional<std::string&> reasonIfUnsupported)
775 {
776  return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
777 }
778 
780  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
781  const ViewsDescriptor& descriptor,
782  Optional<std::string&> reasonIfUnsupported)
783 {
784  return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
785 }
786 
787 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
788  const TensorInfo& output,
789  const StackDescriptor& descriptor,
790  Optional<std::string&> reasonIfUnsupported)
791 {
792  return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
793 }
794 
795 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
796  const std::vector<const TensorInfo*>& outputs,
797  const StandInDescriptor& descriptor,
798  Optional<std::string&> reasonIfUnsupported)
799 {
800  return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
801 }
802 
803 
805  const TensorInfo& output,
806  const StridedSliceDescriptor& descriptor,
807  Optional<std::string&> reasonIfUnsupported)
808 {
809  return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
810 }
811 
813  const TensorInfo& input1,
814  const TensorInfo& output,
815  Optional<std::string&> reasonIfUnsupported)
816 {
817  return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
818 }
819 
821  const TensorInfo& input1,
822  const TensorInfo& output0,
823  const TensorInfo& output1,
824  Optional<std::string&> reasonIfUnsupported)
825 {
826  return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
827 }
828 
830  const TensorInfo& input,
831  const TensorInfo& output,
832  const TransposeConvolution2dDescriptor& descriptor,
833  const TensorInfo& weights,
834  const Optional<TensorInfo>& biases,
835  Optional<std::string&> reasonIfUnsupported)
836 {
837  return m_LayerSupport->IsTransposeConvolution2dSupported(input,
838  output,
839  descriptor,
840  weights,
841  biases,
842  reasonIfUnsupported.value());
843 }
844 
846  const TensorInfo& output,
847  const TransposeDescriptor& descriptor,
848  Optional<std::string&> reasonIfUnsupported)
849 {
850  return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
851 }
852 
854  const TensorInfo& outputStateIn,
855  const TensorInfo& cellStateIn,
856  const TensorInfo& output,
857  const Optional<TensorInfo>& hiddenStateOutput,
858  const Optional<TensorInfo>& cellStateOutput,
859  const LstmDescriptor& descriptor,
860  const LstmInputParamsInfo& paramsInfo,
861  Optional<std::string&> reasonIfUnsupported)
862 {
863  return m_LayerSupport->IsUnidirectionalSequenceLstmSupported(input,
864  outputStateIn,
865  cellStateIn,
866  output,
867  hiddenStateOutput,
868  cellStateOutput,
869  descriptor,
870  paramsInfo,
871  reasonIfUnsupported);
872 }
873 
874 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
const BackendOption & GetOption(size_t idx) const
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConstant() const
Definition: Tensor.cpp:511
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered(const BackendId &id) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
size_t GetOptionCount() const noexcept
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A L2NormalizationDescriptor for the L2NormalizationLayer.
BackendCapability
BackendCapability class.
Definition: Types.hpp:254
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
std::string AsString() const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
unsigned int AsUnsignedInt() const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool AsBool() const
Value getters.
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBool() const
Type getters.
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:141
A SoftmaxDescriptor for the SoftmaxLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool m_ConstantWeights
Enable/disable constant weights and biases.