ArmNN
 21.11
LayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <armnn/LayerSupport.hpp>
7 #include <armnn/Optional.hpp>
10 
12 
13 #include <armnn/utility/Assert.hpp>
14 
15 #include <cstring>
16 #include <algorithm>
17 #include <unordered_map>
18 
19 namespace
20 {
21 
22 /// Helper function to copy a full string to a truncated version.
23 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
24 {
25  if(truncatedString != nullptr)
26  {
27  std::snprintf(truncatedString, maxLength, "%s", fullString);
28  }
29 }
30 
31 } // anonymous namespace
32 
33 namespace armnn
34 {
35 
36 // Helper macro to avoid code duplication.
37 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
38 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
39  std::string reasonIfUnsupportedFull; \
40  bool isSupported; \
41  try { \
42  auto const& backendRegistry = BackendRegistryInstance(); \
43  if (!backendRegistry.IsBackendRegistered(backendId)) \
44  { \
45  std::stringstream ss; \
46  ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
47  reasonIfUnsupportedFull = ss.str(); \
48  isSupported = false; \
49  } \
50  else \
51  { \
52  auto factoryFunc = backendRegistry.GetFactory(backendId); \
53  auto backendObject = factoryFunc(); \
54  auto layerSupportObject = backendObject->GetLayerSupport(); \
55  isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
56  CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
57  } \
58  } catch (const InvalidArgumentException &e) { \
59  /* re-throwing with more context information */ \
60  throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
61  } \
62  return isSupported;
63 
64 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
65 {
66  return input0.GetDataType() == input1.GetDataType();
67 }
68 
69 bool IsActivationSupported(const BackendId& backend,
70  const TensorInfo& input,
71  const TensorInfo& output,
72  const ActivationDescriptor& descriptor,
73  char* reasonIfUnsupported,
74  size_t reasonIfUnsupportedMaxLength)
75 {
76  FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
77 }
78 
79 bool IsAdditionSupported(const BackendId& backend,
80  const TensorInfo& input0,
81  const TensorInfo& input1,
82  const TensorInfo& output,
83  char* reasonIfUnsupported,
84  size_t reasonIfUnsupportedMaxLength)
85 {
86  if(!CheckTensorDataTypesEqual(input0, input1))
87  {
88  return false;
89  }
90 
91  FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
92 }
93 
94 bool IsArgMinMaxSupported(const BackendId& backend,
95  const TensorInfo& input,
96  const TensorInfo& output,
97  const ArgMinMaxDescriptor& descriptor,
98  char* reasonIfUnsupported,
99  size_t reasonIfUnsupportedMaxLength)
100 {
101  FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor);
102 }
103 
105  const TensorInfo& input,
106  const TensorInfo& output,
107  const TensorInfo& mean,
108  const TensorInfo& var,
109  const TensorInfo& beta,
110  const TensorInfo& gamma,
111  const BatchNormalizationDescriptor& descriptor,
112  char* reasonIfUnsupported,
113  size_t reasonIfUnsupportedMaxLength)
114 {
117  input,
118  output,
119  mean,
120  var,
121  beta,
122  gamma,
123  descriptor);
124 }
125 
127  const TensorInfo& input,
128  const TensorInfo& output,
129  const BatchToSpaceNdDescriptor& descriptor,
130  char* reasonIfUnsupported,
131  size_t reasonIfUnsupportedMaxLength)
132 {
135  input,
136  output,
137  descriptor);
138 }
139 
140 bool IsConcatSupported(const BackendId& backend,
141  std::vector<const TensorInfo*> inputs,
142  const TensorInfo& output,
143  const OriginsDescriptor& descriptor,
144  char* reasonIfUnsupported,
145  size_t reasonIfUnsupportedMaxLength)
146 {
147  ARMNN_ASSERT(inputs.size() > 0);
148 
149  FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
150 }
151 
152 bool IsConstantSupported(const BackendId& backend,
153  const TensorInfo& output,
154  char* reasonIfUnsupported,
155  size_t reasonIfUnsupportedMaxLength)
156 {
158 }
159 
161  const TensorInfo& input,
162  const TensorInfo& output,
163  char* reasonIfUnsupported,
164  size_t reasonIfUnsupportedMaxLength)
165 {
167 }
168 
170  const TensorInfo& input,
171  const TensorInfo& output,
172  char* reasonIfUnsupported,
173  size_t reasonIfUnsupportedMaxLength)
174 {
176 }
177 
179  const TensorInfo& input,
180  const TensorInfo& output,
181  const Convolution2dDescriptor& descriptor,
182  const TensorInfo& weights,
183  const Optional<TensorInfo>& biases,
184  char* reasonIfUnsupported,
185  size_t reasonIfUnsupportedMaxLength)
186 {
187  FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
188 }
189 
190 bool IsDebugSupported(const BackendId& backend,
191  const TensorInfo& input,
192  const TensorInfo& output,
193  char* reasonIfUnsupported,
194  size_t reasonIfUnsupportedMaxLength)
195 {
196  FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
197 }
198 
200  const TensorInfo& input,
201  const TensorInfo& output,
202  const DepthwiseConvolution2dDescriptor& descriptor,
203  const TensorInfo& weights,
204  const Optional<TensorInfo>& biases,
205  char* reasonIfUnsupported,
206  size_t reasonIfUnsupportedMaxLength)
207 {
208  if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
209  {
210  // Pre 19.05 ArmNN did not have the dilation parameters.
211  // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
214  input,
215  output,
216  descriptor,
217  weights,
218  biases);
219  }
220  else
221  {
223  IsDilatedDepthwiseConvolutionSupported,
224  input,
225  output,
226  descriptor,
227  weights,
228  biases);
229  }
230 }
231 
232 bool IsDequantizeSupported(const BackendId& backend,
233  const TensorInfo& input,
234  const TensorInfo& output,
235  char* reasonIfUnsupported,
236  size_t reasonIfUnsupportedMaxLength)
237 {
238  FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
239 }
240 
241 bool IsDetectionPostProcessSupported(const BackendId& backend,
242  const TensorInfo& input0,
243  const TensorInfo& input1,
244  const DetectionPostProcessDescriptor& descriptor,
245  char* reasonIfUnsupported,
246  size_t reasonIfUnsupportedMaxLength);
247 
248 bool IsDivisionSupported(const BackendId& backend,
249  const TensorInfo& input0,
250  const TensorInfo& input1,
251  const TensorInfo& output,
252  char* reasonIfUnsupported,
253  size_t reasonIfUnsupportedMaxLength)
254 {
255  FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
256 }
257 
258 bool IsEqualSupported(const BackendId& backend,
259  const TensorInfo& input0,
260  const TensorInfo& input1,
261  const TensorInfo& output,
262  char* reasonIfUnsupported,
263  size_t reasonIfUnsupportedMaxLength)
264 {
266  IsComparisonSupported,
267  input0,
268  input1,
269  output,
271 }
272 
274  const TensorInfo& input,
275  const FakeQuantizationDescriptor& descriptor,
276  char* reasonIfUnsupported,
277  size_t reasonIfUnsupportedMaxLength)
278 {
279  FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
280 }
281 
282 bool IsFloorSupported(const BackendId& backend,
283  const TensorInfo& input,
284  const TensorInfo& output,
285  char* reasonIfUnsupported,
286  size_t reasonIfUnsupportedMaxLength)
287 {
288  // By definition (that is, regardless of compute device), shapes and data type must match.
289  if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
290  {
291  return false;
292  }
293 
294  FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
295 }
297  const TensorInfo& input,
298  const TensorInfo& output,
299  const TensorInfo& weights,
300  const TensorInfo& biases,
301  const FullyConnectedDescriptor& descriptor,
302  char* reasonIfUnsupported,
303  size_t reasonIfUnsupportedMaxLength)
304 {
305  FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
306 }
307 
308 bool IsGatherSupported(const BackendId& backend,
309  const TensorInfo& input0,
310  const TensorInfo& input1,
311  const TensorInfo& output,
312  const GatherDescriptor& descriptor,
313  char* reasonIfUnsupported,
314  size_t reasonIfUnsupportedMaxLength)
315 {
316  FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output, descriptor);
317 }
318 
319 bool IsGreaterSupported(const BackendId& backend,
320  const TensorInfo& input0,
321  const TensorInfo& input1,
322  const TensorInfo& output,
323  char* reasonIfUnsupported,
324  size_t reasonIfUnsupportedMaxLength)
325 {
327  IsComparisonSupported,
328  input0,
329  input1,
330  output,
332 }
333 
334 bool IsInputSupported(const BackendId& backend,
335  const TensorInfo& input,
336  char* reasonIfUnsupported,
337  size_t reasonIfUnsupportedMaxLength)
338 {
340 }
341 
342 
344  const TensorInfo& input,
345  const TensorInfo& output,
346  const L2NormalizationDescriptor& descriptor,
347  char* reasonIfUnsupported,
348  size_t reasonIfUnsupportedMaxLength)
349 {
350  FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
351 }
352 
353 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
354  const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
355  const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
356  const TensorInfo& output, const LstmDescriptor& descriptor,
357  const LstmInputParamsInfo& paramsInfo, char* reasonIfUnsupported,
358  size_t reasonIfUnsupportedMaxLength)
359 
360 {
361  FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
362  scratchBuffer, outputStateOut, cellStateOut,
363  output, descriptor, paramsInfo);
364 }
365 
366 bool IsMaximumSupported(const BackendId& backend,
367  const TensorInfo& input0,
368  const TensorInfo& input1,
369  const TensorInfo& output,
370  char* reasonIfUnsupported,
371  size_t reasonIfUnsupportedMaxLength)
372 {
373  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
374 }
375 
376 bool IsMeanSupported(const BackendId& backend,
377  const TensorInfo& input,
378  const TensorInfo& output,
379  const MeanDescriptor& descriptor,
380  char* reasonIfUnsupported,
381  size_t reasonIfUnsupportedMaxLength)
382 {
383  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
384 }
385 
386 bool IsMemCopySupported(const BackendId &backend,
387  const TensorInfo &input,
388  const TensorInfo &output,
389  char *reasonIfUnsupported,
390  size_t reasonIfUnsupportedMaxLength)
391 {
392  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
393 }
394 
395 bool IsMemImportSupported(const BackendId &backend,
396  const TensorInfo &input,
397  const TensorInfo &output,
398  char *reasonIfUnsupported,
399  size_t reasonIfUnsupportedMaxLength)
400 {
401  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemImportSupported, input, output);
402 }
403 
404 bool IsMergeSupported(const BackendId& backend,
405  const TensorInfo& input0,
406  const TensorInfo& input1,
407  const TensorInfo& output,
408  char* reasonIfUnsupported,
409  size_t reasonIfUnsupportedMaxLength)
410 {
411  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
412 }
413 
414 bool IsMinimumSupported(const BackendId& backend,
415  const TensorInfo& input0,
416  const TensorInfo& input1,
417  const TensorInfo& output,
418  char* reasonIfUnsupported,
419  size_t reasonIfUnsupportedMaxLength)
420 {
421  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
422 }
423 
425  const TensorInfo& input0,
426  const TensorInfo& input1,
427  const TensorInfo& output,
428  char* reasonIfUnsupported,
429  size_t reasonIfUnsupportedMaxLength)
430 {
431  FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
432 }
433 
435  const TensorInfo& input,
436  const TensorInfo& output,
437  const NormalizationDescriptor& descriptor,
438  char* reasonIfUnsupported,
439  size_t reasonIfUnsupportedMaxLength)
440 {
441  FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
442 }
443 
444 bool IsOutputSupported(const BackendId& backend,
445  const TensorInfo& output,
446  char* reasonIfUnsupported,
447  size_t reasonIfUnsupportedMaxLength)
448 {
450 }
451 
452 bool IsPadSupported(const BackendId& backend,
453  const TensorInfo& input,
454  const TensorInfo& output,
455  const PadDescriptor& descriptor,
456  char* reasonIfUnsupported,
457  size_t reasonIfUnsupportedMaxLength)
458 {
459 
460  FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
461 }
462 
463 bool IsQuantizeSupported(const BackendId& backend,
464  const TensorInfo& input,
465  const TensorInfo& output,
466  char* reasonIfUnsupported,
467  size_t reasonIfUnsupportedMaxLength)
468 {
469  FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
470 }
471 
472 bool IsQLstmSupported(const BackendId& backend,
473  const TensorInfo& input,
474  const TensorInfo& previousOutputIn,
475  const TensorInfo& previousCellStateIn,
476  const TensorInfo& outputStateOut,
477  const TensorInfo& cellStateOut,
478  const TensorInfo& output,
479  const QLstmDescriptor& descriptor,
480  const LstmInputParamsInfo& paramsInfo,
481  char* reasonIfUnsupported,
482  size_t reasonIfUnsupportedMaxLength)
483 
484 {
485  FORWARD_LAYER_SUPPORT_FUNC(backend, IsQLstmSupported, input, previousOutputIn, previousCellStateIn,
486  outputStateOut, cellStateOut, output, descriptor, paramsInfo);
487 }
488 
490  const TensorInfo& input,
491  const TensorInfo& previousCellStateIn,
492  const TensorInfo& previousOutputIn,
493  const TensorInfo& cellStateOut,
494  const TensorInfo& output,
495  const QuantizedLstmInputParamsInfo& paramsInfo,
496  char* reasonIfUnsupported,
497  size_t reasonIfUnsupportedMaxLength)
498 
499 {
500  FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizedLstmSupported, input, previousCellStateIn, previousOutputIn,
501  cellStateOut, output, paramsInfo);
502 }
503 
504 
505 bool IsPermuteSupported(const BackendId& backend,
506  const TensorInfo& input,
507  const TensorInfo& output,
508  const PermuteDescriptor& descriptor,
509  char* reasonIfUnsupported,
510  size_t reasonIfUnsupportedMaxLength)
511 {
512  FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
513 }
514 
515 bool IsPooling2dSupported(const BackendId& backend,
516  const TensorInfo& input,
517  const TensorInfo& output,
518  const Pooling2dDescriptor& descriptor,
519  char* reasonIfUnsupported,
520  size_t reasonIfUnsupportedMaxLength)
521 {
522  FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
523 }
524 
525 bool IsPreluSupported(const BackendId& backend,
526  const TensorInfo& input,
527  const TensorInfo& alpha,
528  const TensorInfo& output,
529  char* reasonIfUnsupported,
530  size_t reasonIfUnsupportedMaxLength)
531 {
532  FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
533 }
534 
535 bool IsReduceSupported(const BackendId& backend,
536  const TensorInfo& input,
537  const TensorInfo& output,
538  const ReduceDescriptor& descriptor,
539  char* reasonIfUnsupported,
540  size_t reasonIfUnsupportedMaxLength)
541 {
542  FORWARD_LAYER_SUPPORT_FUNC(backend, IsReduceSupported, input, output, descriptor);
543 }
544 
545 bool IsReshapeSupported(const BackendId& backend,
546  const TensorInfo& input,
547  const TensorInfo& output,
548  const ReshapeDescriptor& descriptor,
549  char* reasonIfUnsupported,
550  size_t reasonIfUnsupportedMaxLength)
551 {
552  FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, output, descriptor);
553 }
554 
555 bool IsResizeSupported(const BackendId& backend,
556  const TensorInfo& input,
557  const TensorInfo& output,
558  const ResizeDescriptor& descriptor,
559  char* reasonIfUnsupported,
560  size_t reasonIfUnsupportedMaxLength)
561 {
562  FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
563 }
564 
565 bool IsSoftmaxSupported(const BackendId& backend,
566  const TensorInfo& input,
567  const TensorInfo& output,
568  const SoftmaxDescriptor& descriptor,
569  char* reasonIfUnsupported,
570  size_t reasonIfUnsupportedMaxLength)
571 {
572  FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
573 }
574 
576  const TensorInfo& input,
577  const TensorInfo& output,
578  const SpaceToBatchNdDescriptor& descriptor,
579  char* reasonIfUnsupported,
580  size_t reasonIfUnsupportedMaxLength)
581 {
582  FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
583 }
584 
585 bool IsSpaceToDepthSupported(const BackendId& backend,
586  const TensorInfo& input,
587  const TensorInfo& output,
588  const SpaceToDepthDescriptor& descriptor,
589  char* reasonIfUnsupported,
590  size_t reasonIfUnsupportedMaxLength)
591 {
592  FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
593 }
594 
595 bool IsSplitterSupported(const BackendId& backend,
596  const TensorInfo& input,
597  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
598  const ViewsDescriptor& descriptor,
599  char* reasonIfUnsupported,
600  size_t reasonIfUnsupportedMaxLength)
601 {
602  FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
603 }
604 
605 bool IsStridedSliceSupported(const BackendId& backend,
606  const TensorInfo& input,
607  const TensorInfo& output,
608  const StridedSliceDescriptor& descriptor,
609  char* reasonIfUnsupported,
610  size_t reasonIfUnsupportedMaxLength)
611 {
612  FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
613 }
614 
615 bool IsSubtractionSupported(const BackendId& backend,
616  const TensorInfo& input0,
617  const TensorInfo& input1,
618  const TensorInfo& output,
619  char* reasonIfUnsupported,
620  size_t reasonIfUnsupportedMaxLength)
621 {
622  FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
623 }
624 
625 bool IsSwitchSupported(const BackendId& backend,
626  const TensorInfo& input0,
627  const TensorInfo& input1,
628  const TensorInfo& output0,
629  const TensorInfo& output1,
630  char* reasonIfUnsupported,
631  size_t reasonIfUnsupportedMaxLength)
632 {
633  FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1);
634 }
635 
636 } // namespace armnn
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ViewsDescriptor for the SplitterLayer.
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ReshapeDescriptor for the ReshapeLayer.
bool IsReduceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsDebugSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsEqualSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Copyright (c) 2021 ARM Limited and Contributors.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
#define FORWARD_LAYER_SUPPORT_FUNC(backendId, func,...)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMemCopySupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A PadDescriptor for the PadLayer.
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
An LstmDescriptor for the LstmLayer.
uint32_t m_DilationX
Dilation factor value for width dimension.
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsFakeQuantizationSupported(const BackendId &backend, const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
A GatherDescriptor for the GatherLayer.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A QLstmDescriptor for the QLstmLayer.
bool IsSwitchSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool CheckTensorDataTypesEqual(const TensorInfo &input0, const TensorInfo &input1)
bool IsQuantizedLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A MeanDescriptor for the MeanLayer.
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A StridedSliceDescriptor for the StridedSliceLayer.
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A NormalizationDescriptor for the NormalizationLayer.
bool IsMergeSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsGatherSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A SoftmaxDescriptor for the SoftmaxLayer.
bool IsMemImportSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsDetectionPostProcessSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const DetectionPostProcessDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A PermuteDescriptor for the PermuteLayer.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.