ArmNN
 21.05
BackendHelper.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/BackendId.hpp>
10 #include <armnn/Types.hpp>
11 
12 namespace armnn
13 {
14 
15 // This handle calls its own IsXXXLayerSupported() functions which then call the polymorphic
16 // ILayerSupport::IsXXXLayerSupported() at the framework level so there is no risk of VTable misalignment.
17 // This is to make ILayerSupport in its abstract form a solely Backend interface alongside a
18 // separate ABI stable frontend class free of virtual functions via an added layer of indirection.
20 {
21 public:
22  explicit LayerSupportHandle(std::shared_ptr<ILayerSupport> layerSupport)
23  : m_LayerSupport(std::move(layerSupport)), m_BackendId(Compute::Undefined) {};
24 
25  explicit LayerSupportHandle(std::shared_ptr<ILayerSupport> layerSupport, const BackendId& backendId)
26  : m_LayerSupport(std::move(layerSupport)), m_BackendId(backendId) {};
27 
28  bool IsBackendRegistered() const;
29 
30  ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
31  bool IsAbsSupported(const TensorInfo& input,
32  const TensorInfo& output,
33  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
34 
35  bool IsActivationSupported(const TensorInfo& input,
36  const TensorInfo& output,
37  const ActivationDescriptor& descriptor,
38  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
39 
40  bool IsAdditionSupported(const TensorInfo& input0,
41  const TensorInfo& input1,
42  const TensorInfo& output,
43  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
44 
45  bool IsArgMinMaxSupported(const TensorInfo& input,
46  const TensorInfo& output,
47  const ArgMinMaxDescriptor& descriptor,
48  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
49 
50  bool IsBatchNormalizationSupported(const TensorInfo& input,
51  const TensorInfo& output,
52  const TensorInfo& mean,
53  const TensorInfo& var,
54  const TensorInfo& beta,
55  const TensorInfo& gamma,
56  const BatchNormalizationDescriptor& descriptor,
57  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
58 
59  bool IsBatchToSpaceNdSupported(const TensorInfo& input,
60  const TensorInfo& output,
61  const BatchToSpaceNdDescriptor& descriptor,
62  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
63 
64  bool IsCastSupported(const TensorInfo& input,
65  const TensorInfo& output,
66  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
67 
68  bool IsComparisonSupported(const TensorInfo& input0,
69  const TensorInfo& input1,
70  const TensorInfo& output,
71  const ComparisonDescriptor& descriptor,
72  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
73 
74  bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
75  const TensorInfo& output,
76  const OriginsDescriptor& descriptor,
77  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
78 
79  bool IsConstantSupported(const TensorInfo& output,
80  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
81 
82  bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
83  const TensorInfo& output,
84  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
85 
86  bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
87  const TensorInfo& output,
88  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
89 
90  bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
91  const TensorInfo& output,
92  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
93 
94  bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
95  const TensorInfo& output,
96  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
97 
98  bool IsConvolution2dSupported(const TensorInfo& input,
99  const TensorInfo& output,
100  const Convolution2dDescriptor& descriptor,
101  const TensorInfo& weights,
102  const Optional<TensorInfo>& biases,
103  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
104 
105  bool IsDebugSupported(const TensorInfo& input,
106  const TensorInfo& output,
107  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
108 
109  bool IsDepthToSpaceSupported(const TensorInfo& input,
110  const TensorInfo& output,
111  const DepthToSpaceDescriptor& descriptor,
112  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
113 
115  const TensorInfo& input,
116  const TensorInfo& output,
117  const DepthwiseConvolution2dDescriptor& descriptor,
118  const TensorInfo& weights,
119  const Optional<TensorInfo>& biases,
120  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
121 
122  bool IsDequantizeSupported(const TensorInfo& input,
123  const TensorInfo& output,
124  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
125 
127  const TensorInfo& scores,
128  const TensorInfo& anchors,
129  const TensorInfo& detectionBoxes,
130  const TensorInfo& detectionClasses,
131  const TensorInfo& detectionScores,
132  const TensorInfo& numDetections,
133  const DetectionPostProcessDescriptor& descriptor,
134  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
135 
137  const TensorInfo& input,
138  const TensorInfo& output,
139  const DepthwiseConvolution2dDescriptor& descriptor,
140  const TensorInfo& weights,
141  const Optional<TensorInfo>& biases,
142  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
143 
144  bool IsDivisionSupported(const TensorInfo& input0,
145  const TensorInfo& input1,
146  const TensorInfo& output,
147  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
148 
149  bool IsElementwiseUnarySupported(const TensorInfo& input,
150  const TensorInfo& output,
151  const ElementwiseUnaryDescriptor& descriptor,
152  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
153 
154  ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
155  bool IsEqualSupported(const TensorInfo& input0,
156  const TensorInfo& input1,
157  const TensorInfo& output,
158  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
159 
160  bool IsFakeQuantizationSupported(const TensorInfo& input,
161  const FakeQuantizationDescriptor& descriptor,
162  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
163 
164  bool IsFillSupported(const TensorInfo& input,
165  const TensorInfo& output,
166  const FillDescriptor& descriptor,
167  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
168 
169  bool IsFloorSupported(const TensorInfo& input,
170  const TensorInfo& output,
171  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
172 
173  bool IsFullyConnectedSupported(const TensorInfo& input,
174  const TensorInfo& output,
175  const TensorInfo& weights,
176  const TensorInfo& biases,
177  const FullyConnectedDescriptor& descriptor,
178  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
179 
180  ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
181  bool IsGatherSupported(const TensorInfo& input0,
182  const TensorInfo& input1,
183  const TensorInfo& output,
184  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
185 
186  bool IsGatherSupported(const TensorInfo& input0,
187  const TensorInfo& input1,
188  const TensorInfo& output,
189  const GatherDescriptor& descriptor,
190  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
191 
192  ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
193  bool IsGreaterSupported(const TensorInfo& input0,
194  const TensorInfo& input1,
195  const TensorInfo& ouput,
196  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
197 
198  bool IsInputSupported(const TensorInfo& input,
199  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
200 
202  const TensorInfo& input,
203  const TensorInfo& output,
204  const InstanceNormalizationDescriptor& descriptor,
205  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
206 
207  bool IsL2NormalizationSupported(const TensorInfo& input,
208  const TensorInfo& output,
209  const L2NormalizationDescriptor& descriptor,
210  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
211 
212  bool IsLogicalBinarySupported(const TensorInfo& input0,
213  const TensorInfo& input1,
214  const TensorInfo& output,
215  const LogicalBinaryDescriptor& descriptor,
216  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
217 
218  bool IsLogicalUnarySupported(const TensorInfo& input,
219  const TensorInfo& output,
220  const ElementwiseUnaryDescriptor& descriptor,
221  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
222 
223  bool IsLogSoftmaxSupported(const TensorInfo& input,
224  const TensorInfo& output,
225  const LogSoftmaxDescriptor& descriptor,
226  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
227 
228  bool IsLstmSupported(const TensorInfo& input,
229  const TensorInfo& outputStateIn,
230  const TensorInfo& cellStateIn,
231  const TensorInfo& scratchBuffer,
232  const TensorInfo& outputStateOut,
233  const TensorInfo& cellStateOut,
234  const TensorInfo& output,
235  const LstmDescriptor& descriptor,
236  const LstmInputParamsInfo& paramsInfo,
237  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
238 
239  bool IsMaximumSupported(const TensorInfo& input0,
240  const TensorInfo& input1,
241  const TensorInfo& output,
242  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
243 
244  bool IsMeanSupported(const TensorInfo& input,
245  const TensorInfo& output,
246  const MeanDescriptor& descriptor,
247  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
248 
249  bool IsMemCopySupported(const TensorInfo& input,
250  const TensorInfo& output,
251  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
252 
253  bool IsMemImportSupported(const TensorInfo& input,
254  const TensorInfo& output,
255  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
256 
257  bool IsMergeSupported(const TensorInfo& input0,
258  const TensorInfo& input1,
259  const TensorInfo& output,
260  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
261 
262  ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
263  bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
264  const TensorInfo& output,
265  const OriginsDescriptor& descriptor,
266  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
267 
268  bool IsMinimumSupported(const TensorInfo& input0,
269  const TensorInfo& input1,
270  const TensorInfo& output,
271  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
272 
273  bool IsMultiplicationSupported(const TensorInfo& input0,
274  const TensorInfo& input1,
275  const TensorInfo& output,
276  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
277 
278  bool IsNormalizationSupported(const TensorInfo& input,
279  const TensorInfo& output,
280  const NormalizationDescriptor& descriptor,
281  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
282 
283  bool IsOutputSupported(const TensorInfo& output,
284  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
285 
286  bool IsPadSupported(const TensorInfo& input,
287  const TensorInfo& output,
288  const PadDescriptor& descriptor,
289  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
290 
291  bool IsPermuteSupported(const TensorInfo& input,
292  const TensorInfo& output,
293  const PermuteDescriptor& descriptor,
294  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
295 
296  bool IsPooling2dSupported(const TensorInfo& input,
297  const TensorInfo& output,
298  const Pooling2dDescriptor& descriptor,
299  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
300 
301  bool IsPreCompiledSupported(const TensorInfo& input,
302  const PreCompiledDescriptor& descriptor,
303  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
304 
305  bool IsPreluSupported(const TensorInfo& input,
306  const TensorInfo& alpha,
307  const TensorInfo& output,
308  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
309 
310  bool IsQuantizeSupported(const TensorInfo& input,
311  const TensorInfo& output,
312  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
313 
314  bool IsQLstmSupported(const TensorInfo& input,
315  const TensorInfo& previousOutputIn,
316  const TensorInfo& previousCellStateIn,
317  const TensorInfo& outputStateOut,
318  const TensorInfo& cellStateOut,
319  const TensorInfo& output,
320  const QLstmDescriptor& descriptor,
321  const LstmInputParamsInfo& paramsInfo,
322  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
323 
324  bool IsQuantizedLstmSupported(const TensorInfo& input,
325  const TensorInfo& previousCellStateIn,
326  const TensorInfo& previousOutputIn,
327  const TensorInfo& cellStateOut,
328  const TensorInfo& output,
329  const QuantizedLstmInputParamsInfo& paramsInfo,
330  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
331 
332  bool IsRankSupported(const TensorInfo& input,
333  const TensorInfo& output,
334  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
335 
336  bool IsReduceSupported(const TensorInfo& input,
337  const TensorInfo& output,
338  const ReduceDescriptor& descriptor,
339  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
340 
341  bool IsReshapeSupported(const TensorInfo& input,
342  const TensorInfo& output,
343  const ReshapeDescriptor& descriptor,
344  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
345 
346  ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
347  bool IsResizeBilinearSupported(const TensorInfo& input,
348  const TensorInfo& output,
349  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
350 
351  bool IsResizeSupported(const TensorInfo& input,
352  const TensorInfo& output,
353  const ResizeDescriptor& descriptor,
354  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
355 
356  ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
357  bool IsRsqrtSupported(const TensorInfo& input,
358  const TensorInfo& output,
359  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
360 
361  bool IsSliceSupported(const TensorInfo& input,
362  const TensorInfo& output,
363  const SliceDescriptor& descriptor,
364  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
365 
366  bool IsSoftmaxSupported(const TensorInfo& input,
367  const TensorInfo& output,
368  const SoftmaxDescriptor& descriptor,
369  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
370 
371  bool IsSpaceToBatchNdSupported(const TensorInfo& input,
372  const TensorInfo& output,
373  const SpaceToBatchNdDescriptor& descriptor,
374  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
375 
376  bool IsSpaceToDepthSupported(const TensorInfo& input,
377  const TensorInfo& output,
378  const SpaceToDepthDescriptor& descriptor,
379  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
380 
381  ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
382  bool IsSplitterSupported(const TensorInfo& input,
383  const ViewsDescriptor& descriptor,
384  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
385 
386  bool IsSplitterSupported(const TensorInfo& input,
387  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
388  const ViewsDescriptor& descriptor,
389  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
390 
391  bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
392  const TensorInfo& output,
393  const StackDescriptor& descriptor,
394  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
395 
396  bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
397  const std::vector<const TensorInfo*>& outputs,
398  const StandInDescriptor& descriptor,
399  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
400 
401 
402  bool IsStridedSliceSupported(const TensorInfo& input,
403  const TensorInfo& output,
404  const StridedSliceDescriptor& descriptor,
405  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
406 
407  bool IsSubtractionSupported(const TensorInfo& input0,
408  const TensorInfo& input1,
409  const TensorInfo& output,
410  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
411 
412  bool IsSwitchSupported(const TensorInfo& input0,
413  const TensorInfo& input1,
414  const TensorInfo& output0,
415  const TensorInfo& output1,
416  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
417 
419  const TensorInfo& input,
420  const TensorInfo& output,
421  const TransposeConvolution2dDescriptor& descriptor,
422  const TensorInfo& weights,
423  const Optional<TensorInfo>& biases,
424  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
425 
426  bool IsTransposeSupported(const TensorInfo& input,
427  const TensorInfo& output,
428  const TransposeDescriptor& descriptor,
429  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
430 
431 private:
432  std::shared_ptr<ILayerSupport> m_LayerSupport;
433  const BackendId m_BackendId;
434 };
435 
436 /// Convenience function to retrieve the ILayerSupportHandle for a backend
438 
439 /// Convenience function to check a capability on a backend
440 bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability);
441 
442 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ViewsDescriptor for the SplitterLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ReshapeDescriptor for the ReshapeLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
LayerSupportHandle(std::shared_ptr< ILayerSupport > layerSupport, const BackendId &backendId)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
Copyright (c) 2021 ARM Limited and Contributors.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsEqualSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A ResizeDescriptor for the ResizeLayer.
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PadDescriptor for the PadLayer.
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
Compute
The Compute enum is now deprecated and it is now being replaced by BackendId.
Definition: BackendId.hpp:21
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An LstmDescriptor for the LstmLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
BackendCapability
BackendCapability class.
Definition: Types.hpp:220
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A GatherDescriptor for the GatherLayer.
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A StandInDescriptor for the StandIn layer.
LayerSupportHandle(std::shared_ptr< ILayerSupport > layerSupport)
A QLstmDescriptor for the QLstmLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SliceDescriptor for the SliceLayer.
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsStandInSupported(const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsCapabilitySupported(const armnn::BackendId &backend, armnn::BackendCapability capability)
Convenience function to check a capability on a backend.
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsBackendRegistered() const
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A MeanDescriptor for the MeanLayer.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A PreCompiledDescriptor for the PreCompiledLayer.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
A NormalizationDescriptor for the NormalizationLayer.
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
A SoftmaxDescriptor for the SoftmaxLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional())
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })