ArmNN
 21.02
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
18 
19 #if defined(ARMCOMPUTECL_ENABLED)
77 #endif
78 
79 
80 namespace armnn
81 {
82 
83 namespace
84 {
85 
86 template<unsigned int FilterSize>
87 bool IsMatchingSize2d(const TensorInfo& weightInfo)
88 {
89  // Width & Height must match.
90  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
91 }
92 
93 template<uint32_t ValidStride>
94 bool IsMatchingStride(uint32_t actualStride)
95 {
96  return ValidStride == actualStride;
97 }
98 
99 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
100 bool IsMatchingStride(uint32_t actualStride)
101 {
102  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
103 }
104 
105 template<typename ... Args>
106 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
107 {
108  IgnoreUnused(reasonIfUnsupported, (args)...);
109 #if defined(ARMCOMPUTECL_ENABLED)
110  return true;
111 #else
112  if (reasonIfUnsupported)
113  {
114  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
115  }
116  return false;
117 #endif
118 }
119 
120 #if defined(ARMCOMPUTECL_ENABLED)
121 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
122 #else
123 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
124 #endif
125 
126 #if defined(ARMCOMPUTECL_ENABLED)
127 template<class FuncType, class... Args>
128 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
129 {
130  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
131  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
132  if (!supported && reasonIfUnsupported)
133  {
134  reasonIfUnsupported.value() = aclStatus.error_description();
135  }
136  return supported;
137 }
138 
139 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
140  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
141 #else
142 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
143  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
144 #endif
145 
146 template<typename FloatFunc, typename Uint8Func, typename ... Params>
147 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
148  DataType dataType,
149  FloatFunc floatFuncPtr,
150  Uint8Func uint8FuncPtr,
151  Params&&... params)
152 {
153  return IsClBackendSupported(reasonIfUnsupported) &&
154  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
155  dataType,
156  floatFuncPtr,
157  floatFuncPtr,
158  uint8FuncPtr,
159  &FalseFunc<>,
160  &FalseFunc<>,
161  std::forward<Params>(params)...);
162 }
163 } // anonymous namespace
164 
166  : m_ModelContextPtr(modelContextPtr)
167 {
168 }
169 
171  : m_ModelContextPtr(nullptr)
172 {
173 }
174 
176  const TensorInfo& output,
177  Optional<std::string&> reasonIfUnsupported) const
178 {
180  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
181 }
182 
184  const TensorInfo& output,
185  const ActivationDescriptor& descriptor,
186  Optional<std::string&> reasonIfUnsupported) const
187 {
189  reasonIfUnsupported,
190  input,
191  output,
192  descriptor);
193 }
194 
196  const TensorInfo& input1,
197  const TensorInfo& output,
198  Optional<std::string&> reasonIfUnsupported) const
199 {
201  reasonIfUnsupported,
202  input0,
203  input1,
204  output,
205  nullptr);
206 }
207 
209  const TensorInfo& output,
210  const ArgMinMaxDescriptor& descriptor,
211  Optional<std::string&> reasonIfUnsupported) const
212 {
213 
215  reasonIfUnsupported,
216  input,
217  output,
218  descriptor);
219 }
220 
222  const TensorInfo& output,
223  const TensorInfo& mean,
224  const TensorInfo& var,
225  const TensorInfo& beta,
226  const TensorInfo& gamma,
227  const BatchNormalizationDescriptor& descriptor,
228  Optional<std::string&> reasonIfUnsupported) const
229 {
231  reasonIfUnsupported,
232  input,
233  output,
234  mean,
235  var,
236  beta,
237  gamma,
238  descriptor,
239  nullptr);
240 }
241 
243  const TensorInfo& output,
244  const BatchToSpaceNdDescriptor& descriptor,
245  Optional<std::string&> reasonIfUnsupported) const
246 {
248  reasonIfUnsupported,
249  input,
250  output,
251  descriptor);
252 }
253 
255  const TensorInfo& input1,
256  const TensorInfo& output,
257  const ComparisonDescriptor& descriptor,
258  Optional<std::string&> reasonIfUnsupported) const
259 {
261  reasonIfUnsupported,
262  input0,
263  input1,
264  output,
265  descriptor);
266 }
267 
268 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
269  const TensorInfo& output,
270  const ConcatDescriptor& descriptor,
271  Optional<std::string&> reasonIfUnsupported) const
272 {
273  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
274  {
275  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
276  return false;
277  }
278 
279  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
280  if(concatInnerAxis < 3) // Width, height, or channels
281  {
283  reasonIfUnsupported,
284  inputs,
285  output,
286  descriptor);
287  }
288  else if (concatInnerAxis == 3)
289  {
290  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
291  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
292  for (auto& input : inputs)
293  {
294  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
295  {
296  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
297  return false;
298  }
299  }
300  return true; // Sub-tensors support concat along batch
301  }
302  else // > 4 dimensions not supported.
303  {
304  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
305  return false;
306  }
307 }
308 
310  Optional<std::string&> reasonIfUnsupported) const
311 {
313  reasonIfUnsupported,
314  output);
315 }
316 
318  const TensorInfo& output,
319  Optional<std::string&> reasonIfUnsupported) const
320 {
322  reasonIfUnsupported,
323  input,
324  output);
325 }
326 
328  const TensorInfo& output,
329  Optional<std::string&> reasonIfUnsupported) const
330 {
332  reasonIfUnsupported,
333  input,
334  output);
335 }
336 
338  const TensorInfo& output,
339  const Convolution2dDescriptor& descriptor,
340  const TensorInfo& weights,
341  const Optional<TensorInfo>& biases,
342  Optional<std::string&> reasonIfUnsupported) const
343 {
344  bool isFastMathEnabled = false;
345 #if defined(ARMCOMPUTECL_ENABLED)
346  if (m_ModelContextPtr)
347  {
348  if (m_ModelContextPtr.get() != nullptr)
349  {
350  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
351  if (modelOptions)
352  {
353  isFastMathEnabled = modelOptions->IsFastMathEnabled();
354  }
355  }
356  }
357 #endif
358 
360  reasonIfUnsupported,
361  input,
362  output,
363  descriptor,
364  weights,
365  biases,
366  isFastMathEnabled,
367  nullptr);
368 }
369 
371  const TensorInfo& output,
372  Optional<std::string&> reasonIfUnsupported) const
373 {
375  reasonIfUnsupported,
376  input,
377  output);
378 }
379 
381  const TensorInfo& output,
382  const DepthToSpaceDescriptor& descriptor,
383  Optional<std::string&> reasonIfUnsupported) const
384 {
386  reasonIfUnsupported,
387  input,
388  output,
389  descriptor);
390 }
391 
393  const TensorInfo& output,
394  const DepthwiseConvolution2dDescriptor& descriptor,
395  const TensorInfo& weights,
396  const Optional<TensorInfo>& biases,
397  Optional<std::string&> reasonIfUnsupported) const
398 {
400  reasonIfUnsupported,
401  input,
402  output,
403  descriptor,
404  weights,
405  biases,
406  nullptr);
407 }
408 
410  const TensorInfo& output,
411  const DepthwiseConvolution2dDescriptor& descriptor,
412  const TensorInfo& weights,
413  const Optional<TensorInfo>& biases,
414  Optional<std::string&> reasonIfUnsupported) const
415 {
417  reasonIfUnsupported,
418  input,
419  output,
420  descriptor,
421  weights,
422  biases,
423  nullptr);
424 }
425 
426 
428  const TensorInfo& input1,
429  const TensorInfo& output,
430  Optional<std::string&> reasonIfUnsupported) const
431 {
433  reasonIfUnsupported,
434  input0,
435  input1,
436  output,
437  nullptr);
438 }
439 
441  const TensorInfo& output,
442  const ElementwiseUnaryDescriptor& descriptor,
443  Optional<std::string&> reasonIfUnsupported) const
444 {
445  switch(descriptor.m_Operation)
446  {
447  case UnaryOperation::Abs:
449  reasonIfUnsupported,
450  input,
451  output);
452  case UnaryOperation::Exp:
454  reasonIfUnsupported,
455  input,
456  output);
457  case UnaryOperation::Neg:
459  reasonIfUnsupported,
460  input,
461  output);
464  reasonIfUnsupported,
465  input,
466  output);
469  reasonIfUnsupported,
470  input,
471  output);
472  default:
473  return false;
474  }
475 }
476 
478  const TensorInfo& output,
479  const FillDescriptor& descriptor,
480  Optional<std::string&> reasonIfUnsupported) const
481 {
482  armnn::IgnoreUnused(input);
483  armnn::IgnoreUnused(output);
484  armnn::IgnoreUnused(descriptor);
485 
486  return IsClBackendSupported(reasonIfUnsupported);
487 }
488 
490  const TensorInfo& output,
491  Optional<std::string&> reasonIfUnsupported) const
492 {
494  reasonIfUnsupported,
495  input,
496  output);
497 }
498 
500  const TensorInfo& output,
501  const TensorInfo& weights,
502  const TensorInfo& biases,
503  const FullyConnectedDescriptor& descriptor,
504  Optional<std::string&> reasonIfUnsupported) const
505 {
507  reasonIfUnsupported,
508  input,
509  output,
510  weights,
511  biases,
512  descriptor,
513  nullptr);
514 }
515 
517  const TensorInfo& input1,
518  const TensorInfo& output,
519  const GatherDescriptor& descriptor,
520  Optional<std::string&> reasonIfUnsupported) const
521 {
523  reasonIfUnsupported,
524  input0,
525  input1,
526  output,
527  descriptor);
528 }
529 
531  const TensorInfo& input1,
532  const TensorInfo& output,
533  Optional<std::string&> reasonIfUnsupported) const
534 {
536  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
537 }
538 
540  Optional<std::string&> reasonIfUnsupported) const
541 {
542  return IsClBackendSupported(reasonIfUnsupported, input);
543 }
544 
546  const TensorInfo& output,
547  const InstanceNormalizationDescriptor& descriptor,
548  Optional<std::string&> reasonIfUnsupported) const
549 {
551  reasonIfUnsupported,
552  input,
553  output,
554  descriptor);
555 }
556 
558  const TensorInfo& output,
559  const L2NormalizationDescriptor& descriptor,
560  Optional<std::string&> reasonIfUnsupported) const
561 {
563  reasonIfUnsupported,
564  input,
565  output,
566  descriptor);
567 }
568 
570  const TensorInfo& input1,
571  const TensorInfo& output,
572  const LogicalBinaryDescriptor& descriptor,
573  Optional<std::string&> reasonIfUnsupported) const
574 {
575  IgnoreUnused(output);
576 
577  switch(descriptor.m_Operation)
578  {
581  reasonIfUnsupported,
582  input0,
583  input1,
584  output);
587  reasonIfUnsupported,
588  input0,
589  input1,
590  output);
591  default:
592  return false;
593  }
594 }
595 
596 
598  const TensorInfo& output,
599  const LogSoftmaxDescriptor& descriptor,
600  Optional<std::string&> reasonIfUnsupported) const
601 {
603  reasonIfUnsupported,
604  input,
605  output,
606  descriptor);
607 }
608 
610  const TensorInfo& outputStateIn,
611  const TensorInfo& cellStateIn,
612  const TensorInfo& scratchBuffer,
613  const TensorInfo& outputStateOut,
614  const TensorInfo& cellStateOut,
615  const TensorInfo& output,
616  const LstmDescriptor& descriptor,
617  const LstmInputParamsInfo& paramsInfo,
618  Optional<std::string&> reasonIfUnsupported) const
619 {
621  reasonIfUnsupported,
622  input,
623  outputStateIn,
624  cellStateIn,
625  scratchBuffer,
626  outputStateOut,
627  cellStateOut,
628  output,
629  descriptor,
630  paramsInfo);
631 }
632 
634  const TensorInfo& input1,
635  const TensorInfo& output,
636  Optional<std::string&> reasonIfUnsupported) const
637 {
639  reasonIfUnsupported,
640  input0,
641  input1,
642  output);
643 }
644 
646  const TensorInfo& output,
647  const MeanDescriptor& descriptor,
648  Optional<std::string&> reasonIfUnsupported) const
649 {
651  reasonIfUnsupported,
652  input,
653  output,
654  descriptor);
655 }
656 
657 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
658  const TensorInfo& output,
659  const MergerDescriptor& descriptor,
660  Optional<std::string&> reasonIfUnsupported) const
661 {
662  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
663 }
664 
666  const TensorInfo& input1,
667  const TensorInfo& output,
668  Optional<std::string&> reasonIfUnsupported) const
669 {
671  reasonIfUnsupported,
672  input0,
673  input1,
674  output);
675 }
676 
678  const TensorInfo& input1,
679  const TensorInfo& output,
680  Optional<std::string&> reasonIfUnsupported) const
681 {
683  reasonIfUnsupported,
684  input0,
685  input1,
686  output,
687  nullptr);
688 }
689 
691  const TensorInfo& output,
692  const NormalizationDescriptor& descriptor,
693  Optional<std::string&> reasonIfUnsupported) const
694 {
695  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
696 }
697 
699  Optional<std::string&> reasonIfUnsupported) const
700 {
701  return IsClBackendSupported(reasonIfUnsupported, output);
702 }
703 
705  const TensorInfo& output,
706  const PadDescriptor& descriptor,
707  Optional<std::string&> reasonIfUnsupported) const
708 {
710  reasonIfUnsupported,
711  input,
712  output,
713  descriptor);
714 }
715 
717  const TensorInfo& output,
718  const PermuteDescriptor& descriptor,
719  Optional<std::string&> reasonIfUnsupported) const
720 {
721  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
722 }
723 
725  const TensorInfo& output,
726  const Pooling2dDescriptor& descriptor,
727  Optional<std::string&> reasonIfUnsupported) const
728 {
729  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
730 }
731 
733  const armnn::TensorInfo &alpha,
734  const armnn::TensorInfo &output,
735  armnn::Optional<std::string &> reasonIfUnsupported) const
736 {
737  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
738 }
739 
741  const TensorInfo& previousOutputIn,
742  const TensorInfo& previousCellStateIn,
743  const TensorInfo& outputStateOut,
744  const TensorInfo& cellStateOut,
745  const TensorInfo& output,
746  const QLstmDescriptor& descriptor,
747  const LstmInputParamsInfo& paramsInfo,
748  Optional<std::string&> reasonIfUnsupported) const
749 {
750  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
751  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
752  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
753  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
754  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
756  {
758  reasonIfUnsupported,
759  input,
760  previousCellStateIn,
761  previousOutputIn,
762  cellStateOut,
763  outputStateOut,
764  output,
765  descriptor,
766  paramsInfo);
767  }
768  else
769  {
770  return false;
771  }
772 }
773 
775  const TensorInfo& previousCellStateIn,
776  const TensorInfo& previousOutputIn,
777  const TensorInfo& cellStateOut,
778  const TensorInfo& output,
779  const QuantizedLstmInputParamsInfo& paramsInfo,
780  Optional<std::string&> reasonIfUnsupported) const
781 {
783  reasonIfUnsupported,
784  input,
785  previousCellStateIn,
786  previousOutputIn,
787  cellStateOut,
788  output,
789  paramsInfo);
790 }
791 
793  const TensorInfo& output,
794  Optional<std::string&> reasonIfUnsupported) const
795 {
797  reasonIfUnsupported,
798  input,
799  output);
800 }
801 
803  const TensorInfo& output,
804  const ReduceDescriptor& descriptor,
805  Optional<std::string&> reasonIfUnsupported) const
806 {
808  reasonIfUnsupported,
809  input,
810  output,
811  descriptor);
812 }
813 
815  const TensorInfo& output,
816  const ReshapeDescriptor& descriptor,
817  Optional<std::string&> reasonIfUnsupported) const
818 {
819  IgnoreUnused(descriptor);
820  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
821 }
822 
824  const TensorInfo& output,
825  const ResizeDescriptor& descriptor,
826  Optional<std::string&> reasonIfUnsupported) const
827 {
828  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
829 }
830 
832  const TensorInfo& output,
833  Optional<std::string&> reasonIfUnsupported) const
834 {
835  ResizeDescriptor descriptor;
836  descriptor.m_Method = ResizeMethod::Bilinear;
837  descriptor.m_DataLayout = DataLayout::NCHW;
838 
839  const TensorShape& outputShape = output.GetShape();
840  descriptor.m_TargetHeight = outputShape[2];
841  descriptor.m_TargetWidth = outputShape[3];
842 
843  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
844 }
845 
847  const TensorInfo& output,
848  Optional<std::string&> reasonIfUnsupported) const
849 {
851  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
852 }
853 
855  const TensorInfo& output,
856  const SliceDescriptor& descriptor,
857  Optional<std::string&> reasonIfUnsupported) const
858 {
859  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
860 }
861 
863  const TensorInfo& output,
864  const SoftmaxDescriptor& descriptor,
865  Optional<std::string&> reasonIfUnsupported) const
866 {
867  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
868 }
869 
871  const TensorInfo& output,
872  const SpaceToBatchNdDescriptor& descriptor,
873  Optional<std::string&> reasonIfUnsupported) const
874 {
876  reasonIfUnsupported,
877  input,
878  output,
879  descriptor);
880 }
881 
883  const TensorInfo& output,
884  const SpaceToDepthDescriptor& descriptor,
885  Optional<std::string&> reasonIfUnsupported) const
886 {
888  reasonIfUnsupported,
889  input,
890  output,
891  descriptor);
892 }
893 
895  const ViewsDescriptor& descriptor,
896  Optional<std::string&> reasonIfUnsupported) const
897 {
898  IgnoreUnused(descriptor);
899  return IsSupportedForDataTypeCl(reasonIfUnsupported,
900  input.GetDataType(),
901  &TrueFunc<>,
902  &TrueFunc<>);
903 }
904 
906  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
907  const ViewsDescriptor& descriptor,
908  Optional<std::string&> reasonIfUnsupported) const
909 {
910 #if defined(ARMCOMPUTECL_ENABLED)
911  // Split along the last dimension, cannot use sub-tensors
912  // as width and height of the sub-tensors do not match
913  // the width and height of the parent tensor
914  // in case of input with more than 2D.
915  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
916  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
917  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
918  {
920  reasonIfUnsupported,
921  input,
922  outputs,
923  *splitAxis.begin());
924  }
925 #endif
926  IgnoreUnused(descriptor);
927  for (auto output : outputs)
928  {
929  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
930  {
931  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
932  return false;
933  }
934  }
935  return true;
936 }
937 
938 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
939  const TensorInfo& output,
940  const StackDescriptor& descriptor,
941  Optional<std::string&> reasonIfUnsupported) const
942 {
944  reasonIfUnsupported,
945  inputs,
946  output,
947  descriptor);
948 }
949 
951  const TensorInfo& output,
952  const StridedSliceDescriptor& descriptor,
953  Optional<std::string&> reasonIfUnsupported) const
954 {
956  reasonIfUnsupported,
957  input,
958  output,
959  descriptor);
960 }
961 
963  const TensorInfo& input1,
964  const TensorInfo& output,
965  Optional<std::string&> reasonIfUnsupported) const
966 {
968  reasonIfUnsupported,
969  input0,
970  input1,
971  output,
972  nullptr);
973 }
974 
976  const TensorInfo& output,
977  const TransposeConvolution2dDescriptor& descriptor,
978  const TensorInfo& weights,
979  const Optional<TensorInfo>& biases,
980  Optional<std::string&> reasonIfUnsupported) const
981 {
983  reasonIfUnsupported,
984  input,
985  output,
986  descriptor,
987  weights,
988  biases);
989 }
990 
992  const TensorInfo& output,
993  const TransposeDescriptor& descriptor,
994  Optional<std::string&> reasonIfUnsupported) const
995 {
996  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
997 }
998 
999 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &desc)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:32
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &desc, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &desc)
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &desc)
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override