ArmNN
 20.11
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/Tensor.hpp>
13 #include <armnn/Types.hpp>
15 
16 #include <InternalTypes.hpp>
17 #include <LayerSupportCommon.hpp>
20 
21 #if defined(ARMCOMPUTENEON_ENABLED)
74 #endif
75 
76 namespace armnn
77 {
78 
79 namespace
80 {
81 
82 template< typename ... Args>
83 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
84 {
85  IgnoreUnused(reasonIfUnsupported, (args)...);
86 #if defined(ARMCOMPUTENEON_ENABLED)
87  return true;
88 #else
89  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
90  return false;
91 #endif
92 }
93 
94 template<typename FloatFunc, typename Uint8Func, typename ... Params>
95 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
96  DataType dataType,
97  FloatFunc floatFuncPtr,
98  Uint8Func uint8FuncPtr,
99  Params&&... params)
100 {
101  return IsNeonBackendSupported(reasonIfUnsupported) &&
102  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
103  dataType,
104  floatFuncPtr,
105  floatFuncPtr,
106  uint8FuncPtr,
107  &FalseFunc<>,
108  &FalseFunc<>,
109  std::forward<Params>(params)...);
110 }
111 
112 #if defined(ARMCOMPUTENEON_ENABLED)
113 template<class FuncType, class... Args>
114 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
115 {
116  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
117  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
118  if (!supported && reasonIfUnsupported)
119  {
120  reasonIfUnsupported.value() = aclStatus.error_description();
121  }
122  return supported;
123 }
124 
125 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
126  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
127 #else
128 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
129  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
130 #endif
131 } // anonymous namespace
132 
134  : m_ModelContextPtr(modelContextPtr)
135 {
136 }
137 
139  : m_ModelContextPtr(nullptr)
140 {
141 }
142 
144  const TensorInfo& output,
145  Optional<std::string&> reasonIfUnsupported) const
146 {
148  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
149 }
150 
152  const TensorInfo& output,
153  const ActivationDescriptor& descriptor,
154  Optional<std::string&> reasonIfUnsupported) const
155 {
156  IgnoreUnused(descriptor);
158  reasonIfUnsupported,
159  input,
160  output,
161  descriptor);
162 }
163 
165  const TensorInfo& input1,
166  const TensorInfo& output,
167  Optional<std::string&> reasonIfUnsupported) const
168 {
170  reasonIfUnsupported,
171  input0,
172  input1,
173  output,
174  nullptr);
175 }
176 
178  const TensorInfo& output,
179  const ArgMinMaxDescriptor& descriptor,
180  Optional<std::string&> reasonIfUnsupported) const
181 {
183  reasonIfUnsupported,
184  input,
185  output,
186  descriptor);
187 }
188 
190  const TensorInfo& output,
191  const TensorInfo& mean,
192  const TensorInfo& var,
193  const TensorInfo& beta,
194  const TensorInfo& gamma,
195  const BatchNormalizationDescriptor& descriptor,
196  Optional<std::string&> reasonIfUnsupported) const
197 {
199  reasonIfUnsupported,
200  input,
201  output,
202  mean,
203  var,
204  beta,
205  gamma,
206  descriptor,
207  nullptr);
208 }
209 
211  const TensorInfo& output,
212  const BatchToSpaceNdDescriptor& descriptor,
213  Optional<std::string&> reasonIfUnsupported) const
214 {
216  reasonIfUnsupported,
217  input,
218  output,
219  descriptor);
220 }
221 
223  const TensorInfo& input1,
224  const TensorInfo& output,
225  const ComparisonDescriptor& descriptor,
226  Optional<std::string&> reasonIfUnsupported) const
227 {
228 
230  reasonIfUnsupported,
231  input0,
232  input1,
233  output,
234  descriptor);
235 }
236 
237 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
238  const TensorInfo& output,
239  const ConcatDescriptor& descriptor,
240  Optional<std::string&> reasonIfUnsupported) const
241 {
242  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
243  {
244  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
245  return false;
246  }
247 
248  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
249  if(concatInnerAxis < 3) // Width, height, or channels
250  {
252  reasonIfUnsupported,
253  inputs,
254  output,
255  descriptor);
256  }
257  else if (concatInnerAxis == 3)
258  {
259  for (auto& input : inputs)
260  {
261  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
262  {
263  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
264  return false;
265  }
266  }
267  return true; // Sub-tensors support concat along batch
268  }
269  else // > 4 dimensions not supported.
270  {
271  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
272  return false;
273  }
274 }
275 
277  Optional<std::string&> reasonIfUnsupported) const
278 {
280  reasonIfUnsupported,
281  output);
282 }
283 
285  const TensorInfo& output,
286  Optional<std::string&> reasonIfUnsupported) const
287 {
288  armnn::IgnoreUnused(input);
289  armnn::IgnoreUnused(output);
290  armnn::IgnoreUnused(reasonIfUnsupported);
291  return true;
292 }
293 
295  const TensorInfo& output,
296  Optional<std::string&> reasonIfUnsupported) const
297 {
298  armnn::IgnoreUnused(input);
299  armnn::IgnoreUnused(output);
300  armnn::IgnoreUnused(reasonIfUnsupported);
301  return true;
302 }
303 
305  const TensorInfo& output,
306  Optional<std::string&> reasonIfUnsupported) const
307 {
308  armnn::IgnoreUnused(input);
309  armnn::IgnoreUnused(output);
310  armnn::IgnoreUnused(reasonIfUnsupported);
311  return true;
312 }
313 
315  const TensorInfo& output,
316  Optional<std::string&> reasonIfUnsupported) const
317 {
318  armnn::IgnoreUnused(input);
319  armnn::IgnoreUnused(output);
320  armnn::IgnoreUnused(reasonIfUnsupported);
321  return true;
322 }
323 
325  const TensorInfo& output,
326  const Convolution2dDescriptor& descriptor,
327  const TensorInfo& weights,
328  const Optional<TensorInfo>& biases,
329  Optional<std::string&> reasonIfUnsupported) const
330 {
331  bool isFastMathEnabled = false;
332 #if defined(ARMCOMPUTENEON_ENABLED)
333  if (m_ModelContextPtr)
334  {
335  if (m_ModelContextPtr.get() != nullptr)
336  {
337  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
338  if (modelOptions)
339  {
340  isFastMathEnabled = modelOptions->IsFastMathEnabled();
341  }
342  }
343  }
344 #endif
345 
347  reasonIfUnsupported,
348  input,
349  output,
350  descriptor,
351  weights,
352  biases,
353  isFastMathEnabled,
354  nullptr);
355 }
356 
358  const TensorInfo& output,
359  const DepthToSpaceDescriptor& descriptor,
360  Optional<std::string&> reasonIfUnsupported) const
361 {
363  reasonIfUnsupported,
364  input,
365  output,
366  descriptor);
367 }
368 
370  const TensorInfo& output,
371  const DepthwiseConvolution2dDescriptor& descriptor,
372  const TensorInfo& weights,
373  const Optional<TensorInfo>& biases,
374  Optional<std::string&> reasonIfUnsupported) const
375 {
377  reasonIfUnsupported,
378  input,
379  output,
380  descriptor,
381  weights,
382  biases,
383  nullptr);
384 }
385 
387  const TensorInfo& output,
388  Optional<std::string&> reasonIfUnsupported) const
389 {
391  reasonIfUnsupported,
392  input,
393  output);
394 }
395 
397  const TensorInfo& output,
398  const DepthwiseConvolution2dDescriptor& descriptor,
399  const TensorInfo& weights,
400  const Optional<TensorInfo>& biases,
401  Optional<std::string&> reasonIfUnsupported) const
402 {
404  reasonIfUnsupported,
405  input,
406  output,
407  descriptor,
408  weights,
409  biases,
410  nullptr);
411 }
412 
414  const TensorInfo& output,
415  const ElementwiseUnaryDescriptor& descriptor,
416  Optional<std::string&> reasonIfUnsupported) const
417 {
418  switch(descriptor.m_Operation)
419  {
420  case UnaryOperation::Abs:
422  reasonIfUnsupported,
423  input,
424  output);
425  case UnaryOperation::Exp:
427  reasonIfUnsupported,
428  input,
429  output);
430  case UnaryOperation::Neg:
432  reasonIfUnsupported,
433  input,
434  output);
437  reasonIfUnsupported,
438  input,
439  output);
442  reasonIfUnsupported,
443  input,
444  output);
445  default:
446  return false;
447  }
448 }
449 
451  const TensorInfo& output,
452  const FillDescriptor& descriptor,
453  Optional<std::string&> reasonIfUnsupported) const
454 {
455  armnn::IgnoreUnused(input);
456  armnn::IgnoreUnused(output);
457  armnn::IgnoreUnused(descriptor);
458 
459  return IsNeonBackendSupported(reasonIfUnsupported);
460 }
461 
463  const TensorInfo& output,
464  Optional<std::string&> reasonIfUnsupported) const
465 {
466  armnn::IgnoreUnused(output);
467  return IsNeonBackendSupported(reasonIfUnsupported) &&
468  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
469  input.GetDataType(),
470  &FalseFuncF16<>,
471  &TrueFunc<>,
472  &FalseFuncU8<>,
473  &FalseFuncI32<>,
474  &FalseFuncU8<>);
475 }
476 
478  const TensorInfo& output,
479  const TensorInfo& weights,
480  const TensorInfo& biases,
481  const FullyConnectedDescriptor& descriptor,
482  Optional<std::string&> reasonIfUnsupported) const
483 {
485  reasonIfUnsupported,
486  input,
487  output,
488  weights,
489  biases,
490  descriptor,
491  nullptr);
492 }
493 
495  const TensorInfo& input1,
496  const TensorInfo& output,
497  const GatherDescriptor& descriptor,
498  Optional<std::string&> reasonIfUnsupported) const
499 {
501  reasonIfUnsupported,
502  input0,
503  input1,
504  output,
505  descriptor);
506 }
507 
509  const armnn::TensorInfo& input1,
510  const armnn::TensorInfo& output,
511  armnn::Optional<std::string&> reasonIfUnsupported) const
512 {
514  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
515 }
516 
518  Optional<std::string&> reasonIfUnsupported) const
519 {
520  return IsNeonBackendSupported(reasonIfUnsupported, input);
521 }
522 
524  const TensorInfo& output,
525  const InstanceNormalizationDescriptor& descriptor,
526  Optional<std::string&> reasonIfUnsupported) const
527 {
529  reasonIfUnsupported,
530  input,
531  output,
532  descriptor);
533 }
534 
536  const TensorInfo& output,
537  const L2NormalizationDescriptor& descriptor,
538  Optional<std::string&> reasonIfUnsupported) const
539 {
540  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
541 }
542 
544  const TensorInfo& input1,
545  const TensorInfo& output,
546  const LogicalBinaryDescriptor& descriptor,
547  Optional<std::string&> reasonIfUnsupported) const
548 {
549  switch(descriptor.m_Operation)
550  {
553  reasonIfUnsupported,
554  input0,
555  input1,
556  output);
559  reasonIfUnsupported,
560  input0,
561  input1,
562  output);
563  default:
564  return false;
565  }
566 }
567 
569  const TensorInfo& output,
570  const LogSoftmaxDescriptor& descriptor,
571  Optional<std::string&> reasonIfUnsupported) const
572 {
573  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
574 }
575 
577  const TensorInfo& outputStateIn,
578  const TensorInfo& cellStateIn,
579  const TensorInfo& scratchBuffer,
580  const TensorInfo& outputStateOut,
581  const TensorInfo& cellStateOut,
582  const TensorInfo& output,
583  const LstmDescriptor& descriptor,
584  const LstmInputParamsInfo& paramsInfo,
585  Optional<std::string&> reasonIfUnsupported) const
586 {
588  reasonIfUnsupported,
589  input,
590  outputStateIn,
591  cellStateIn,
592  scratchBuffer,
593  outputStateOut,
594  cellStateOut,
595  output,
596  descriptor,
597  paramsInfo);
598 }
599 
601  const TensorInfo& input1,
602  const TensorInfo& output,
603  Optional<std::string&> reasonIfUnsupported) const
604 {
606  reasonIfUnsupported,
607  input0,
608  input1,
609  output);
610 }
611 
613  const TensorInfo& output,
614  const MeanDescriptor& descriptor,
615  Optional<std::string&> reasonIfUnsupported) const
616 {
618  reasonIfUnsupported,
619  input,
620  output,
621  descriptor);
622 }
623 
624 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
625  const TensorInfo& output,
626  const MergerDescriptor& descriptor,
627  Optional<std::string&> reasonIfUnsupported) const
628 {
629  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
630 }
631 
633  const TensorInfo& input1,
634  const TensorInfo& output,
635  Optional<std::string&> reasonIfUnsupported) const
636 {
638  reasonIfUnsupported,
639  input0,
640  input1,
641  output);
642 }
643 
645  const TensorInfo& input1,
646  const TensorInfo& output,
647  Optional<std::string&> reasonIfUnsupported) const
648 {
650  reasonIfUnsupported,
651  input0,
652  input1,
653  output,
654  nullptr);
655 }
656 
658  const TensorInfo& input1,
659  const TensorInfo& output,
660  Optional<std::string&> reasonIfUnsupported) const
661 {
663  reasonIfUnsupported,
664  input0,
665  input1,
666  output,
667  nullptr);
668 }
669 
671  const TensorInfo& output,
672  const NormalizationDescriptor& descriptor,
673  Optional<std::string&> reasonIfUnsupported) const
674 {
676  reasonIfUnsupported,
677  input,
678  output,
679  descriptor);
680 }
681 
683  Optional<std::string&> reasonIfUnsupported) const
684 {
685  return IsNeonBackendSupported(reasonIfUnsupported, output);
686 }
687 
689  const TensorInfo& output,
690  const PadDescriptor& descriptor,
691  Optional<std::string&> reasonIfUnsupported) const
692 {
694  reasonIfUnsupported,
695  input,
696  output,
697  descriptor);
698 }
699 
701  const TensorInfo& output,
702  const PermuteDescriptor& descriptor,
703  Optional<std::string&> reasonIfUnsupported) const
704 {
705  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
706 }
707 
709  const TensorInfo& output,
710  const Pooling2dDescriptor& descriptor,
711  Optional<std::string&> reasonIfUnsupported) const
712 {
713  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
714 }
715 
717  const armnn::TensorInfo &alpha,
718  const armnn::TensorInfo &output,
719  armnn::Optional<std::string &> reasonIfUnsupported) const
720 {
721  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
722 }
723 
725  const TensorInfo& previousOutputIn,
726  const TensorInfo& previousCellStateIn,
727  const TensorInfo& outputStateOut,
728  const TensorInfo& cellStateOut,
729  const TensorInfo& output,
730  const QLstmDescriptor& descriptor,
731  const LstmInputParamsInfo& paramsInfo,
732  Optional<std::string&> reasonIfUnsupported) const
733 {
734  // Check required here in order to pass IsLayerSupported for datatypes tests
735  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
736  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
737  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
738  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
739  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
741  {
743  reasonIfUnsupported,
744  input,
745  previousCellStateIn,
746  previousOutputIn,
747  cellStateOut,
748  outputStateOut,
749  output,
750  descriptor,
751  paramsInfo);
752  }
753  else
754  {
755  return false;
756  }
757 }
758 
760  const TensorInfo& output,
761  Optional<std::string&> reasonIfUnsupported) const
762 {
764  reasonIfUnsupported,
765  input,
766  output);
767 }
768 
770  const TensorInfo& cellStateIn,
771  const TensorInfo& outputStateIn,
772  const TensorInfo& cellStateOut,
773  const TensorInfo& outputStateOut,
774  const QuantizedLstmInputParamsInfo& paramsInfo,
775  Optional<std::string&> reasonIfUnsupported) const
776 {
778  reasonIfUnsupported,
779  input,
780  cellStateIn,
781  outputStateIn,
782  cellStateOut,
783  outputStateOut,
784  paramsInfo);
785 }
786 
788  const TensorInfo& output,
789  const ReshapeDescriptor& descriptor,
790  Optional<std::string&> reasonIfUnsupported) const
791 {
792  armnn::IgnoreUnused(descriptor);
794  reasonIfUnsupported,
795  input,
796  output);
797 }
798 
800  const TensorInfo& output,
801  const ResizeDescriptor& descriptor,
802  Optional<std::string&> reasonIfUnsupported) const
803 {
805  reasonIfUnsupported,
806  input,
807  output,
808  descriptor);
809 }
810 
812  const TensorInfo& output,
813  Optional<std::string&> reasonIfUnsupported) const
814 {
815  ResizeDescriptor descriptor;
816  descriptor.m_Method = ResizeMethod::Bilinear;
817  descriptor.m_DataLayout = DataLayout::NCHW;
818 
819  const TensorShape& outputShape = output.GetShape();
820  descriptor.m_TargetHeight = outputShape[2];
821  descriptor.m_TargetWidth = outputShape[3];
822 
823  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
824 }
825 
827  const TensorInfo& output,
828  Optional<std::string&> reasonIfUnsupported) const
829 {
831  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
832 }
833 
835  const TensorInfo& output,
836  const SliceDescriptor& descriptor,
837  Optional<std::string&> reasonIfUnsupported) const
838 {
840  reasonIfUnsupported,
841  input,
842  output,
843  descriptor);
844 }
845 
847  const TensorInfo& output,
848  const SoftmaxDescriptor& descriptor,
849  Optional<std::string&> reasonIfUnsupported) const
850 {
851  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
852 }
853 
855  const TensorInfo& output,
856  const SpaceToBatchNdDescriptor& descriptor,
857  Optional<std::string&> reasonIfUnsupported) const
858 {
860  reasonIfUnsupported,
861  input,
862  output,
863  descriptor);
864 }
865 
867  const TensorInfo& output,
868  const SpaceToDepthDescriptor& descriptor,
869  Optional<std::string&> reasonIfUnsupported) const
870 {
872  reasonIfUnsupported,
873  input,
874  output,
875  descriptor);
876 }
877 
879  const ViewsDescriptor& descriptor,
880  Optional<std::string&> reasonIfUnsupported) const
881 {
882  armnn::IgnoreUnused(descriptor);
883  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
884  input.GetDataType(),
885  &TrueFunc<>,
886  &TrueFunc<>);
887 }
888 
890  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
891  const ViewsDescriptor& descriptor,
892  Optional<std::string&> reasonIfUnsupported) const
893 {
894 #if defined(ARMCOMPUTENEON_ENABLED)
895  // Split along the last dimension, cannot use sub-tensors
896  // as width and height of the sub-tensors do not match
897  // the width and height of the parent tensor
898  // in case of input with more than 2D.
899  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
900  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
901  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
902  {
904  reasonIfUnsupported,
905  input,
906  outputs,
907  *splitAxis.begin());
908  }
909 #endif
910  IgnoreUnused(descriptor);
911  for (auto output : outputs)
912  {
913  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
914  {
915  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
916  return false;
917  }
918  }
919  return true;
920 }
921 
922 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
923  const TensorInfo& output,
924  const StackDescriptor& descriptor,
925  Optional<std::string&> reasonIfUnsupported) const
926 {
928  reasonIfUnsupported,
929  inputs,
930  output,
931  descriptor);
932 }
933 
935  const TensorInfo& output,
936  const StridedSliceDescriptor& descriptor,
937  Optional<std::string&> reasonIfUnsupported) const
938 {
940  reasonIfUnsupported,
941  input,
942  output,
943  descriptor);
944 }
945 
947  const TensorInfo& input1,
948  const TensorInfo& output,
949  Optional<std::string&> reasonIfUnsupported) const
950 {
952  reasonIfUnsupported,
953  input0,
954  input1,
955  output,
956  nullptr);
957 }
958 
960  const TensorInfo& output,
961  const TransposeConvolution2dDescriptor& descriptor,
962  const TensorInfo& weights,
963  const Optional<TensorInfo>& biases,
964  Optional<std::string&> reasonIfUnsupported) const
965 {
967  reasonIfUnsupported,
968  input,
969  output,
970  descriptor,
971  weights,
972  biases);
973 }
974 
976  const TensorInfo& output,
977  const TransposeDescriptor& descriptor,
978  Optional<std::string&> reasonIfUnsupported) const
979 {
980  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
981 }
982 
983 } // namespace armnn
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:73
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2020 ARM Limited.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A StackDescriptor for the StackLayer.
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PadDescriptor for the PadLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:32
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:93
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
A Pooling2dDescriptor for the Pooling2dLayer.
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
A NormalizationDescriptor for the NormalizationLayer.
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PermuteDescriptor for the PermuteLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override