ArmNN  NotReleased
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
8 
9 #include <armnn/Descriptors.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <InternalTypes.hpp>
15 #include <LayerSupportCommon.hpp>
16 
17 #include <boost/core/ignore_unused.hpp>
18 
19 #if defined(ARMCOMPUTENEON_ENABLED)
62 #endif
63 
64 using namespace boost;
65 
66 namespace armnn
67 {
68 
69 namespace
70 {
71 
72 template< typename ... Args>
73 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
74 {
75  boost::ignore_unused(reasonIfUnsupported, (args)...);
76 #if defined(ARMCOMPUTENEON_ENABLED)
77  return true;
78 #else
79  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
80  return false;
81 #endif
82 }
83 
84 template<typename FloatFunc, typename Uint8Func, typename ... Params>
85 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
86  DataType dataType,
87  FloatFunc floatFuncPtr,
88  Uint8Func uint8FuncPtr,
89  Params&&... params)
90 {
91  return IsNeonBackendSupported(reasonIfUnsupported) &&
92  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
93  dataType,
94  floatFuncPtr,
95  floatFuncPtr,
96  uint8FuncPtr,
97  &FalseFunc<>,
98  &FalseFunc<>,
99  std::forward<Params>(params)...);
100 }
101 
102 #if defined(ARMCOMPUTENEON_ENABLED)
103 template<class FuncType, class... Args>
104 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
105 {
106  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
107  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
108  if (!supported && reasonIfUnsupported)
109  {
110  reasonIfUnsupported.value() = aclStatus.error_description();
111  }
112  return supported;
113 }
114 
115 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
116  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
117 #else
118 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
119  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
120 #endif
121 } // anonymous namespace
122 
123 bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
124  const TensorInfo& output,
125  Optional<std::string&> reasonIfUnsupported) const
126 {
127  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
128  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
129 }
130 
132  const TensorInfo& output,
133  const ActivationDescriptor& descriptor,
134  Optional<std::string&> reasonIfUnsupported) const
135 {
136  ignore_unused(descriptor);
138  reasonIfUnsupported,
139  input,
140  output,
141  descriptor);
142 }
143 
145  const TensorInfo& input1,
146  const TensorInfo& output,
147  Optional<std::string&> reasonIfUnsupported) const
148 {
150  reasonIfUnsupported,
151  input0,
152  input1,
153  output);
154 }
155 
157  const TensorInfo& output,
158  const ArgMinMaxDescriptor& descriptor,
159  Optional<std::string&> reasonIfUnsupported) const
160 {
162  reasonIfUnsupported,
163  input,
164  output,
165  descriptor);
166 }
167 
169  const TensorInfo& output,
170  const TensorInfo& mean,
171  const TensorInfo& var,
172  const TensorInfo& beta,
173  const TensorInfo& gamma,
174  const BatchNormalizationDescriptor& descriptor,
175  Optional<std::string&> reasonIfUnsupported) const
176 {
178  reasonIfUnsupported,
179  input,
180  output,
181  mean,
182  var,
183  beta,
184  gamma,
185  descriptor);
186 }
187 
189  const TensorInfo& output,
190  const BatchToSpaceNdDescriptor& descriptor,
191  Optional<std::string&> reasonIfUnsupported) const
192 {
194  reasonIfUnsupported,
195  input,
196  output,
197  descriptor);
198 }
199 
200 bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
201  const TensorInfo& input1,
202  const TensorInfo& output,
203  const ComparisonDescriptor& descriptor,
204  Optional<std::string&> reasonIfUnsupported) const
205 {
206  if (descriptor.m_Operation == ComparisonOperation::Greater)
207  {
209  reasonIfUnsupported,
210  input0,
211  input1,
212  output);
213  }
214 
215  return false;
216 }
217 
218 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
219  const TensorInfo& output,
220  const ConcatDescriptor& descriptor,
221  Optional<std::string&> reasonIfUnsupported) const
222 {
223  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
224  {
225  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
226  return false;
227  }
228 
229  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
230  if(concatInnerAxis < 3) // Width, height, or channels
231  {
233  reasonIfUnsupported,
234  inputs,
235  output,
236  descriptor);
237  }
238  else if (concatInnerAxis == 3)
239  {
240  for (auto& input : inputs)
241  {
242  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
243  {
244  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
245  return false;
246  }
247  }
248  return true; // Sub-tensors support concat along batch
249  }
250  else // > 4 dimensions not supported.
251  {
252  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
253  return false;
254  }
255 }
256 
258  Optional<std::string&> reasonIfUnsupported) const
259 {
260  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
261  output.GetDataType(),
262  &TrueFunc<>,
263  &TrueFunc<>);
264 }
265 
267  const TensorInfo& output,
268  Optional<std::string&> reasonIfUnsupported) const
269 {
270  ignore_unused(input);
271  ignore_unused(output);
272  ignore_unused(reasonIfUnsupported);
273  return true;
274 }
275 
277  const TensorInfo& output,
278  Optional<std::string&> reasonIfUnsupported) const
279 {
280  ignore_unused(input);
281  ignore_unused(output);
282  ignore_unused(reasonIfUnsupported);
283  return true;
284 }
285 
287  const TensorInfo& output,
288  const Convolution2dDescriptor& descriptor,
289  const TensorInfo& weights,
290  const Optional<TensorInfo>& biases,
291  Optional<std::string&> reasonIfUnsupported) const
292 {
294  reasonIfUnsupported,
295  input,
296  output,
297  descriptor,
298  weights,
299  biases);
300 }
301 
302 bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
303  const TensorInfo& output,
304  const DepthToSpaceDescriptor& descriptor,
305  Optional<std::string&> reasonIfUnsupported) const
306 {
308  reasonIfUnsupported,
309  input,
310  output,
311  descriptor);
312 }
313 
315  const TensorInfo& output,
316  const DepthwiseConvolution2dDescriptor& descriptor,
317  const TensorInfo& weights,
318  const Optional<TensorInfo>& biases,
319  Optional<std::string&> reasonIfUnsupported) const
320 {
322  reasonIfUnsupported,
323  input,
324  output,
325  descriptor,
326  weights,
327  biases);
328 }
329 
331  const TensorInfo& output,
332  Optional<std::string&> reasonIfUnsupported) const
333 {
335  reasonIfUnsupported,
336  input,
337  output);
338 }
339 
340 bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
341  const TensorInfo& output,
342  const DepthwiseConvolution2dDescriptor& descriptor,
343  const TensorInfo& weights,
344  const Optional<TensorInfo>& biases,
345  Optional<std::string&> reasonIfUnsupported) const
346 {
348  reasonIfUnsupported,
349  input,
350  output,
351  descriptor,
352  weights,
353  biases);
354 }
355 
356 bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
357  const TensorInfo& output,
358  const ElementwiseUnaryDescriptor& descriptor,
359  Optional<std::string&> reasonIfUnsupported) const
360 {
361  if (descriptor.m_Operation == UnaryOperation::Abs)
362  {
364  reasonIfUnsupported,
365  input,
366  output);
367  }
368  else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
369  {
371  reasonIfUnsupported,
372  input,
373  output);
374  }
375 
376  return false;
377 }
378 
380  const TensorInfo& output,
381  Optional<std::string&> reasonIfUnsupported) const
382 {
383  ignore_unused(output);
384  return IsNeonBackendSupported(reasonIfUnsupported) &&
385  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
386  input.GetDataType(),
387  &FalseFuncF16<>,
388  &TrueFunc<>,
389  &FalseFuncU8<>,
390  &FalseFuncI32<>,
391  &FalseFuncU8<>);
392 }
393 
395  const TensorInfo& output,
396  const TensorInfo& weights,
397  const TensorInfo& biases,
398  const FullyConnectedDescriptor& descriptor,
399  Optional<std::string&> reasonIfUnsupported) const
400 {
402  reasonIfUnsupported,
403  input,
404  output,
405  weights,
406  biases,
407  descriptor);
408 }
409 
411  const armnn::TensorInfo& input1,
412  const armnn::TensorInfo& output,
413  armnn::Optional<std::string&> reasonIfUnsupported) const
414 {
415  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
416  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
417 }
418 
420  Optional<std::string&> reasonIfUnsupported) const
421 {
422  return IsNeonBackendSupported(reasonIfUnsupported, input);
423 }
424 
425 bool NeonLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
426  const TensorInfo& output,
427  const InstanceNormalizationDescriptor& descriptor,
428  Optional<std::string&> reasonIfUnsupported) const
429 {
431  reasonIfUnsupported,
432  input,
433  output,
434  descriptor);
435 }
436 
438  const TensorInfo& output,
439  const L2NormalizationDescriptor& descriptor,
440  Optional<std::string&> reasonIfUnsupported) const
441 {
442  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
443 }
444 
446  const TensorInfo& outputStateIn,
447  const TensorInfo& cellStateIn,
448  const TensorInfo& scratchBuffer,
449  const TensorInfo& outputStateOut,
450  const TensorInfo& cellStateOut,
451  const TensorInfo& output,
452  const LstmDescriptor& descriptor,
453  const LstmInputParamsInfo& paramsInfo,
454  Optional<std::string&> reasonIfUnsupported) const
455 {
457  reasonIfUnsupported,
458  input,
459  outputStateIn,
460  cellStateIn,
461  scratchBuffer,
462  outputStateOut,
463  cellStateOut,
464  output,
465  descriptor,
466  paramsInfo);
467 }
468 
470  const TensorInfo& input1,
471  const TensorInfo& output,
472  Optional<std::string&> reasonIfUnsupported) const
473 {
475  reasonIfUnsupported,
476  input0,
477  input1,
478  output);
479 }
480 
482  const TensorInfo& output,
483  const MeanDescriptor& descriptor,
484  Optional<std::string&> reasonIfUnsupported) const
485 {
487  reasonIfUnsupported,
488  input,
489  output,
490  descriptor);
491 }
492 
493 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
494  const TensorInfo& output,
495  const MergerDescriptor& descriptor,
496  Optional<std::string&> reasonIfUnsupported) const
497 {
498  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
499 }
500 
502  const TensorInfo& input1,
503  const TensorInfo& output,
504  Optional<std::string&> reasonIfUnsupported) const
505 {
507  reasonIfUnsupported,
508  input0,
509  input1,
510  output);
511 }
512 
514  const TensorInfo& input1,
515  const TensorInfo& output,
516  Optional<std::string&> reasonIfUnsupported) const
517 {
519  reasonIfUnsupported,
520  input0,
521  input1,
522  output);
523 }
524 
526  const TensorInfo& input1,
527  const TensorInfo& output,
528  Optional<std::string&> reasonIfUnsupported) const
529 {
531  reasonIfUnsupported,
532  input0,
533  input1,
534  output);
535 }
536 
538  const TensorInfo& output,
539  const NormalizationDescriptor& descriptor,
540  Optional<std::string&> reasonIfUnsupported) const
541 {
543  reasonIfUnsupported,
544  input,
545  output,
546  descriptor);
547 }
548 
550  Optional<std::string&> reasonIfUnsupported) const
551 {
552  return IsNeonBackendSupported(reasonIfUnsupported, output);
553 }
554 
556  const TensorInfo& output,
557  const PadDescriptor& descriptor,
558  Optional<std::string&> reasonIfUnsupported) const
559 {
561  reasonIfUnsupported,
562  input,
563  output,
564  descriptor);
565 }
566 
568  const TensorInfo& output,
569  const PermuteDescriptor& descriptor,
570  Optional<std::string&> reasonIfUnsupported) const
571 {
572  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
573 }
574 
576  const TensorInfo& output,
577  const Pooling2dDescriptor& descriptor,
578  Optional<std::string&> reasonIfUnsupported) const
579 {
580  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
581 }
582 
584  const armnn::TensorInfo &alpha,
585  const armnn::TensorInfo &output,
586  armnn::Optional<std::string &> reasonIfUnsupported) const
587 {
588  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
589 }
590 
592  const TensorInfo& output,
593  Optional<std::string&> reasonIfUnsupported) const
594 {
596  reasonIfUnsupported,
597  input,
598  output);
599 }
600 
602  const TensorInfo& cellStateIn,
603  const TensorInfo& outputStateIn,
604  const TensorInfo& cellStateOut,
605  const TensorInfo& outputStateOut,
606  const QuantizedLstmInputParamsInfo& paramsInfo,
607  Optional<std::string&> reasonIfUnsupported) const
608 {
610  reasonIfUnsupported,
611  input,
612  cellStateIn,
613  outputStateIn,
614  cellStateOut,
615  outputStateOut,
616  paramsInfo);
617 }
618 
620  const TensorInfo& output,
621  const ReshapeDescriptor& descriptor,
622  Optional<std::string&> reasonIfUnsupported) const
623 {
624  ignore_unused(descriptor);
626  reasonIfUnsupported,
627  input,
628  output);
629 }
630 
632  const TensorInfo& output,
633  const ResizeDescriptor& descriptor,
634  Optional<std::string&> reasonIfUnsupported) const
635 {
637  reasonIfUnsupported,
638  input,
639  output,
640  descriptor);
641 }
642 
644  const TensorInfo& output,
645  Optional<std::string&> reasonIfUnsupported) const
646 {
647  ResizeDescriptor descriptor;
648  descriptor.m_Method = ResizeMethod::Bilinear;
649  descriptor.m_DataLayout = DataLayout::NCHW;
650 
651  const TensorShape& outputShape = output.GetShape();
652  descriptor.m_TargetHeight = outputShape[2];
653  descriptor.m_TargetWidth = outputShape[3];
654 
655  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
656 }
657 
659  const TensorInfo& output,
660  Optional<std::string&> reasonIfUnsupported) const
661 {
662  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
663  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
664 }
665 
666 bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input,
667  const TensorInfo& output,
668  const SliceDescriptor& descriptor,
669  Optional<std::string&> reasonIfUnsupported) const
670 {
672  reasonIfUnsupported,
673  input,
674  output,
675  descriptor);
676 }
677 
679  const TensorInfo& output,
680  const SoftmaxDescriptor& descriptor,
681  Optional<std::string&> reasonIfUnsupported) const
682 {
683  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
684 }
685 
687  const TensorInfo& output,
688  const SpaceToBatchNdDescriptor& descriptor,
689  Optional<std::string&> reasonIfUnsupported) const
690 {
692  reasonIfUnsupported,
693  input,
694  output,
695  descriptor);
696 }
697 
699  const TensorInfo& output,
700  const SpaceToDepthDescriptor& descriptor,
701  Optional<std::string&> reasonIfUnsupported) const
702 {
704  reasonIfUnsupported,
705  input,
706  output,
707  descriptor);
708 }
709 
711  const ViewsDescriptor& descriptor,
712  Optional<std::string&> reasonIfUnsupported) const
713 {
714  ignore_unused(descriptor);
715  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
716  input.GetDataType(),
717  &TrueFunc<>,
718  &TrueFunc<>);
719 }
720 
722  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
723  const ViewsDescriptor& descriptor,
724  Optional<std::string&> reasonIfUnsupported) const
725 {
726 #if defined(ARMCOMPUTENEON_ENABLED)
727  // Split along the last dimension, cannot use sub-tensors
728  // as width and height of the sub-tensors do not match
729  // the width and height of the parent tensor
730  // in case of input with more than 2D.
731  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
732  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
733  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
734  {
736  reasonIfUnsupported,
737  input,
738  outputs,
739  *splitAxis.begin());
740  }
741 #endif
742  boost::ignore_unused(descriptor);
743  for (auto output : outputs)
744  {
745  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
746  {
747  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
748  return false;
749  }
750  }
751  return true;
752 }
753 
754 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
755  const TensorInfo& output,
756  const StackDescriptor& descriptor,
757  Optional<std::string&> reasonIfUnsupported) const
758 {
760  reasonIfUnsupported,
761  inputs,
762  output,
763  descriptor);
764 }
765 
767  const TensorInfo& output,
768  const StridedSliceDescriptor& descriptor,
769  Optional<std::string&> reasonIfUnsupported) const
770 {
772  reasonIfUnsupported,
773  input,
774  output,
775  descriptor);
776 }
777 
779  const TensorInfo& input1,
780  const TensorInfo& output,
781  Optional<std::string&> reasonIfUnsupported) const
782 {
784  reasonIfUnsupported,
785  input0,
786  input1,
787  output);
788 }
789 
791  const TensorInfo& output,
792  const TransposeConvolution2dDescriptor& descriptor,
793  const TensorInfo& weights,
794  const Optional<TensorInfo>& biases,
795  Optional<std::string&> reasonIfUnsupported) const
796 {
798  reasonIfUnsupported,
799  input,
800  output,
801  descriptor,
802  weights,
803  biases);
804 }
805 
806 } // namespace armnn
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Status
Definition: Types.hpp:26
A NormalizationDescriptor for the NormalizationLayer.
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
uint32_t m_TargetHeight
Target height value.
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
bool IsRsqrtSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsResizeBilinearSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A PadDescriptor for the PadLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQuantizedLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
A L2NormalizationDescriptor for the L2NormalizationLayer.
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
ISubgraphViewConverter supported
A ReshapeDescriptor for the ReshapeLayer.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
uint32_t GetNumDimensions() const
Get the number of dimensions.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
An LstmDescriptor for the LstmLayer.
uint32_t GetNumDimensions() const
Get the number of dimensions.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
arm_compute::Status NeonGreaterWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SoftmaxDescriptor for the SoftmaxLayer.
uint32_t m_TargetWidth
Target width value.
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
DataType
Definition: Types.hpp:32
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:95
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStackSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const StackDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMergerSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A SliceDescriptor for the SliceLayer.
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A PermuteDescriptor for the PermuteLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
A MeanDescriptor for the MeanLayer.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ResizeDescriptor for the ResizeLayer.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
A StridedSliceDescriptor for the StridedSliceLayer.