ArmNN
 20.11
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
18 
19 #if defined(ARMCOMPUTECL_ENABLED)
76 #endif
77 
78 
79 namespace armnn
80 {
81 
82 namespace
83 {
84 
85 template<unsigned int FilterSize>
86 bool IsMatchingSize2d(const TensorInfo& weightInfo)
87 {
88  // Width & Height must match.
89  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
90 }
91 
92 template<uint32_t ValidStride>
93 bool IsMatchingStride(uint32_t actualStride)
94 {
95  return ValidStride == actualStride;
96 }
97 
98 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
99 bool IsMatchingStride(uint32_t actualStride)
100 {
101  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
102 }
103 
104 template<typename ... Args>
105 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
106 {
107  IgnoreUnused(reasonIfUnsupported, (args)...);
108 #if defined(ARMCOMPUTECL_ENABLED)
109  return true;
110 #else
111  if (reasonIfUnsupported)
112  {
113  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
114  }
115  return false;
116 #endif
117 }
118 
119 #if defined(ARMCOMPUTECL_ENABLED)
120 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
121 #else
122 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
123 #endif
124 
125 #if defined(ARMCOMPUTECL_ENABLED)
126 template<class FuncType, class... Args>
127 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
128 {
129  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
130  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
131  if (!supported && reasonIfUnsupported)
132  {
133  reasonIfUnsupported.value() = aclStatus.error_description();
134  }
135  return supported;
136 }
137 
138 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
139  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
140 #else
141 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
142  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
143 #endif
144 
145 template<typename FloatFunc, typename Uint8Func, typename ... Params>
146 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
147  DataType dataType,
148  FloatFunc floatFuncPtr,
149  Uint8Func uint8FuncPtr,
150  Params&&... params)
151 {
152  return IsClBackendSupported(reasonIfUnsupported) &&
153  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
154  dataType,
155  floatFuncPtr,
156  floatFuncPtr,
157  uint8FuncPtr,
158  &FalseFunc<>,
159  &FalseFunc<>,
160  std::forward<Params>(params)...);
161 }
162 } // anonymous namespace
163 
165  : m_ModelContextPtr(modelContextPtr)
166 {
167 }
168 
170  : m_ModelContextPtr(nullptr)
171 {
172 }
173 
175  const TensorInfo& output,
176  Optional<std::string&> reasonIfUnsupported) const
177 {
179  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
180 }
181 
183  const TensorInfo& output,
184  const ActivationDescriptor& descriptor,
185  Optional<std::string&> reasonIfUnsupported) const
186 {
188  reasonIfUnsupported,
189  input,
190  output,
191  descriptor);
192 }
193 
195  const TensorInfo& input1,
196  const TensorInfo& output,
197  Optional<std::string&> reasonIfUnsupported) const
198 {
200  reasonIfUnsupported,
201  input0,
202  input1,
203  output,
204  nullptr);
205 }
206 
208  const TensorInfo& output,
209  const ArgMinMaxDescriptor& descriptor,
210  Optional<std::string&> reasonIfUnsupported) const
211 {
212 
214  reasonIfUnsupported,
215  input,
216  output,
217  descriptor);
218 }
219 
221  const TensorInfo& output,
222  const TensorInfo& mean,
223  const TensorInfo& var,
224  const TensorInfo& beta,
225  const TensorInfo& gamma,
226  const BatchNormalizationDescriptor& descriptor,
227  Optional<std::string&> reasonIfUnsupported) const
228 {
230  reasonIfUnsupported,
231  input,
232  output,
233  mean,
234  var,
235  beta,
236  gamma,
237  descriptor,
238  nullptr);
239 }
240 
242  const TensorInfo& output,
243  const BatchToSpaceNdDescriptor& descriptor,
244  Optional<std::string&> reasonIfUnsupported) const
245 {
247  reasonIfUnsupported,
248  input,
249  output,
250  descriptor);
251 }
252 
254  const TensorInfo& input1,
255  const TensorInfo& output,
256  const ComparisonDescriptor& descriptor,
257  Optional<std::string&> reasonIfUnsupported) const
258 {
260  reasonIfUnsupported,
261  input0,
262  input1,
263  output,
264  descriptor);
265 }
266 
267 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
268  const TensorInfo& output,
269  const ConcatDescriptor& descriptor,
270  Optional<std::string&> reasonIfUnsupported) const
271 {
272  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
273  {
274  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
275  return false;
276  }
277 
278  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
279  if(concatInnerAxis < 3) // Width, height, or channels
280  {
282  reasonIfUnsupported,
283  inputs,
284  output,
285  descriptor);
286  }
287  else if (concatInnerAxis == 3)
288  {
289  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
290  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
291  for (auto& input : inputs)
292  {
293  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
294  {
295  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
296  return false;
297  }
298  }
299  return true; // Sub-tensors support concat along batch
300  }
301  else // > 4 dimensions not supported.
302  {
303  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
304  return false;
305  }
306 }
307 
309  Optional<std::string&> reasonIfUnsupported) const
310 {
312  reasonIfUnsupported,
313  output);
314 }
315 
317  const TensorInfo& output,
318  Optional<std::string&> reasonIfUnsupported) const
319 {
321  reasonIfUnsupported,
322  input,
323  output);
324 }
325 
327  const TensorInfo& output,
328  Optional<std::string&> reasonIfUnsupported) const
329 {
331  reasonIfUnsupported,
332  input,
333  output);
334 }
335 
337  const TensorInfo& output,
338  const Convolution2dDescriptor& descriptor,
339  const TensorInfo& weights,
340  const Optional<TensorInfo>& biases,
341  Optional<std::string&> reasonIfUnsupported) const
342 {
343  bool isFastMathEnabled = false;
344 #if defined(ARMCOMPUTECL_ENABLED)
345  if (m_ModelContextPtr)
346  {
347  if (m_ModelContextPtr.get() != nullptr)
348  {
349  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
350  if (modelOptions)
351  {
352  isFastMathEnabled = modelOptions->IsFastMathEnabled();
353  }
354  }
355  }
356 #endif
357 
359  reasonIfUnsupported,
360  input,
361  output,
362  descriptor,
363  weights,
364  biases,
365  isFastMathEnabled,
366  nullptr);
367 }
368 
370  const TensorInfo& output,
371  Optional<std::string&> reasonIfUnsupported) const
372 {
374  reasonIfUnsupported,
375  input,
376  output);
377 }
378 
380  const TensorInfo& output,
381  const DepthToSpaceDescriptor& descriptor,
382  Optional<std::string&> reasonIfUnsupported) const
383 {
385  reasonIfUnsupported,
386  input,
387  output,
388  descriptor);
389 }
390 
392  const TensorInfo& output,
393  const DepthwiseConvolution2dDescriptor& descriptor,
394  const TensorInfo& weights,
395  const Optional<TensorInfo>& biases,
396  Optional<std::string&> reasonIfUnsupported) const
397 {
399  reasonIfUnsupported,
400  input,
401  output,
402  descriptor,
403  weights,
404  biases,
405  nullptr);
406 }
407 
409  const TensorInfo& output,
410  const DepthwiseConvolution2dDescriptor& descriptor,
411  const TensorInfo& weights,
412  const Optional<TensorInfo>& biases,
413  Optional<std::string&> reasonIfUnsupported) const
414 {
416  reasonIfUnsupported,
417  input,
418  output,
419  descriptor,
420  weights,
421  biases,
422  nullptr);
423 }
424 
425 
427  const TensorInfo& input1,
428  const TensorInfo& output,
429  Optional<std::string&> reasonIfUnsupported) const
430 {
432  reasonIfUnsupported,
433  input0,
434  input1,
435  output,
436  nullptr);
437 }
438 
440  const TensorInfo& output,
441  const ElementwiseUnaryDescriptor& descriptor,
442  Optional<std::string&> reasonIfUnsupported) const
443 {
444  switch(descriptor.m_Operation)
445  {
446  case UnaryOperation::Abs:
448  reasonIfUnsupported,
449  input,
450  output);
451  case UnaryOperation::Exp:
453  reasonIfUnsupported,
454  input,
455  output);
456  case UnaryOperation::Neg:
458  reasonIfUnsupported,
459  input,
460  output);
463  reasonIfUnsupported,
464  input,
465  output);
468  reasonIfUnsupported,
469  input,
470  output);
471  default:
472  return false;
473  }
474 }
475 
477  const TensorInfo& output,
478  const FillDescriptor& descriptor,
479  Optional<std::string&> reasonIfUnsupported) const
480 {
481  armnn::IgnoreUnused(input);
482  armnn::IgnoreUnused(output);
483  armnn::IgnoreUnused(descriptor);
484 
485  return IsClBackendSupported(reasonIfUnsupported);
486 }
487 
489  const TensorInfo& output,
490  Optional<std::string&> reasonIfUnsupported) const
491 {
493  reasonIfUnsupported,
494  input,
495  output);
496 }
497 
499  const TensorInfo& output,
500  const TensorInfo& weights,
501  const TensorInfo& biases,
502  const FullyConnectedDescriptor& descriptor,
503  Optional<std::string&> reasonIfUnsupported) const
504 {
506  reasonIfUnsupported,
507  input,
508  output,
509  weights,
510  biases,
511  descriptor,
512  nullptr);
513 }
514 
516  const TensorInfo& input1,
517  const TensorInfo& output,
518  const GatherDescriptor& descriptor,
519  Optional<std::string&> reasonIfUnsupported) const
520 {
522  reasonIfUnsupported,
523  input0,
524  input1,
525  output,
526  descriptor);
527 }
528 
530  const TensorInfo& input1,
531  const TensorInfo& output,
532  Optional<std::string&> reasonIfUnsupported) const
533 {
535  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
536 }
537 
539  Optional<std::string&> reasonIfUnsupported) const
540 {
541  return IsClBackendSupported(reasonIfUnsupported, input);
542 }
543 
545  const TensorInfo& output,
546  const InstanceNormalizationDescriptor& descriptor,
547  Optional<std::string&> reasonIfUnsupported) const
548 {
550  reasonIfUnsupported,
551  input,
552  output,
553  descriptor);
554 }
555 
557  const TensorInfo& output,
558  const L2NormalizationDescriptor& descriptor,
559  Optional<std::string&> reasonIfUnsupported) const
560 {
562  reasonIfUnsupported,
563  input,
564  output,
565  descriptor);
566 }
567 
569  const TensorInfo& input1,
570  const TensorInfo& output,
571  const LogicalBinaryDescriptor& descriptor,
572  Optional<std::string&> reasonIfUnsupported) const
573 {
574  IgnoreUnused(output);
575 
576  switch(descriptor.m_Operation)
577  {
580  reasonIfUnsupported,
581  input0,
582  input1,
583  output);
586  reasonIfUnsupported,
587  input0,
588  input1,
589  output);
590  default:
591  return false;
592  }
593 }
594 
595 
597  const TensorInfo& output,
598  const LogSoftmaxDescriptor& descriptor,
599  Optional<std::string&> reasonIfUnsupported) const
600 {
602  reasonIfUnsupported,
603  input,
604  output,
605  descriptor);
606 }
607 
609  const TensorInfo& outputStateIn,
610  const TensorInfo& cellStateIn,
611  const TensorInfo& scratchBuffer,
612  const TensorInfo& outputStateOut,
613  const TensorInfo& cellStateOut,
614  const TensorInfo& output,
615  const LstmDescriptor& descriptor,
616  const LstmInputParamsInfo& paramsInfo,
617  Optional<std::string&> reasonIfUnsupported) const
618 {
620  reasonIfUnsupported,
621  input,
622  outputStateIn,
623  cellStateIn,
624  scratchBuffer,
625  outputStateOut,
626  cellStateOut,
627  output,
628  descriptor,
629  paramsInfo);
630 }
631 
633  const TensorInfo& input1,
634  const TensorInfo& output,
635  Optional<std::string&> reasonIfUnsupported) const
636 {
638  reasonIfUnsupported,
639  input0,
640  input1,
641  output);
642 }
643 
645  const TensorInfo& output,
646  const MeanDescriptor& descriptor,
647  Optional<std::string&> reasonIfUnsupported) const
648 {
650  reasonIfUnsupported,
651  input,
652  output,
653  descriptor);
654 }
655 
656 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
657  const TensorInfo& output,
658  const MergerDescriptor& descriptor,
659  Optional<std::string&> reasonIfUnsupported) const
660 {
661  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
662 }
663 
665  const TensorInfo& input1,
666  const TensorInfo& output,
667  Optional<std::string&> reasonIfUnsupported) const
668 {
670  reasonIfUnsupported,
671  input0,
672  input1,
673  output);
674 }
675 
677  const TensorInfo& input1,
678  const TensorInfo& output,
679  Optional<std::string&> reasonIfUnsupported) const
680 {
682  reasonIfUnsupported,
683  input0,
684  input1,
685  output,
686  nullptr);
687 }
688 
690  const TensorInfo& output,
691  const NormalizationDescriptor& descriptor,
692  Optional<std::string&> reasonIfUnsupported) const
693 {
694  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
695 }
696 
698  Optional<std::string&> reasonIfUnsupported) const
699 {
700  return IsClBackendSupported(reasonIfUnsupported, output);
701 }
702 
704  const TensorInfo& output,
705  const PadDescriptor& descriptor,
706  Optional<std::string&> reasonIfUnsupported) const
707 {
709  reasonIfUnsupported,
710  input,
711  output,
712  descriptor);
713 }
714 
716  const TensorInfo& output,
717  const PermuteDescriptor& descriptor,
718  Optional<std::string&> reasonIfUnsupported) const
719 {
720  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
721 }
722 
724  const TensorInfo& output,
725  const Pooling2dDescriptor& descriptor,
726  Optional<std::string&> reasonIfUnsupported) const
727 {
728  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
729 }
730 
732  const armnn::TensorInfo &alpha,
733  const armnn::TensorInfo &output,
734  armnn::Optional<std::string &> reasonIfUnsupported) const
735 {
736  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
737 }
738 
740  const TensorInfo& previousOutputIn,
741  const TensorInfo& previousCellStateIn,
742  const TensorInfo& outputStateOut,
743  const TensorInfo& cellStateOut,
744  const TensorInfo& output,
745  const QLstmDescriptor& descriptor,
746  const LstmInputParamsInfo& paramsInfo,
747  Optional<std::string&> reasonIfUnsupported) const
748 {
749  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
750  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
751  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
752  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
753  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
755  {
757  reasonIfUnsupported,
758  input,
759  previousCellStateIn,
760  previousOutputIn,
761  cellStateOut,
762  outputStateOut,
763  output,
764  descriptor,
765  paramsInfo);
766  }
767  else
768  {
769  return false;
770  }
771 }
772 
774  const TensorInfo& previousCellStateIn,
775  const TensorInfo& previousOutputIn,
776  const TensorInfo& cellStateOut,
777  const TensorInfo& output,
778  const QuantizedLstmInputParamsInfo& paramsInfo,
779  Optional<std::string&> reasonIfUnsupported) const
780 {
782  reasonIfUnsupported,
783  input,
784  previousCellStateIn,
785  previousOutputIn,
786  cellStateOut,
787  output,
788  paramsInfo);
789 }
790 
792  const TensorInfo& output,
793  Optional<std::string&> reasonIfUnsupported) const
794 {
796  reasonIfUnsupported,
797  input,
798  output);
799 }
800 
802  const TensorInfo& output,
803  const ReshapeDescriptor& descriptor,
804  Optional<std::string&> reasonIfUnsupported) const
805 {
806  IgnoreUnused(descriptor);
807  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
808 }
809 
811  const TensorInfo& output,
812  const ResizeDescriptor& descriptor,
813  Optional<std::string&> reasonIfUnsupported) const
814 {
815  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
816 }
817 
819  const TensorInfo& output,
820  Optional<std::string&> reasonIfUnsupported) const
821 {
822  ResizeDescriptor descriptor;
823  descriptor.m_Method = ResizeMethod::Bilinear;
824  descriptor.m_DataLayout = DataLayout::NCHW;
825 
826  const TensorShape& outputShape = output.GetShape();
827  descriptor.m_TargetHeight = outputShape[2];
828  descriptor.m_TargetWidth = outputShape[3];
829 
830  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
831 }
832 
834  const TensorInfo& output,
835  Optional<std::string&> reasonIfUnsupported) const
836 {
838  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
839 }
840 
842  const TensorInfo& output,
843  const SliceDescriptor& descriptor,
844  Optional<std::string&> reasonIfUnsupported) const
845 {
846  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
847 }
848 
850  const TensorInfo& output,
851  const SoftmaxDescriptor& descriptor,
852  Optional<std::string&> reasonIfUnsupported) const
853 {
854  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
855 }
856 
858  const TensorInfo& output,
859  const SpaceToBatchNdDescriptor& descriptor,
860  Optional<std::string&> reasonIfUnsupported) const
861 {
863  reasonIfUnsupported,
864  input,
865  output,
866  descriptor);
867 }
868 
870  const TensorInfo& output,
871  const SpaceToDepthDescriptor& descriptor,
872  Optional<std::string&> reasonIfUnsupported) const
873 {
875  reasonIfUnsupported,
876  input,
877  output,
878  descriptor);
879 }
880 
882  const ViewsDescriptor& descriptor,
883  Optional<std::string&> reasonIfUnsupported) const
884 {
885  IgnoreUnused(descriptor);
886  return IsSupportedForDataTypeCl(reasonIfUnsupported,
887  input.GetDataType(),
888  &TrueFunc<>,
889  &TrueFunc<>);
890 }
891 
893  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
894  const ViewsDescriptor& descriptor,
895  Optional<std::string&> reasonIfUnsupported) const
896 {
897 #if defined(ARMCOMPUTECL_ENABLED)
898  // Split along the last dimension, cannot use sub-tensors
899  // as width and height of the sub-tensors do not match
900  // the width and height of the parent tensor
901  // in case of input with more than 2D.
902  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
903  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
904  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
905  {
907  reasonIfUnsupported,
908  input,
909  outputs,
910  *splitAxis.begin());
911  }
912 #endif
913  IgnoreUnused(descriptor);
914  for (auto output : outputs)
915  {
916  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
917  {
918  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
919  return false;
920  }
921  }
922  return true;
923 }
924 
925 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
926  const TensorInfo& output,
927  const StackDescriptor& descriptor,
928  Optional<std::string&> reasonIfUnsupported) const
929 {
931  reasonIfUnsupported,
932  inputs,
933  output,
934  descriptor);
935 }
936 
938  const TensorInfo& output,
939  const StridedSliceDescriptor& descriptor,
940  Optional<std::string&> reasonIfUnsupported) const
941 {
943  reasonIfUnsupported,
944  input,
945  output,
946  descriptor);
947 }
948 
950  const TensorInfo& input1,
951  const TensorInfo& output,
952  Optional<std::string&> reasonIfUnsupported) const
953 {
955  reasonIfUnsupported,
956  input0,
957  input1,
958  output,
959  nullptr);
960 }
961 
963  const TensorInfo& output,
964  const TransposeConvolution2dDescriptor& descriptor,
965  const TensorInfo& weights,
966  const Optional<TensorInfo>& biases,
967  Optional<std::string&> reasonIfUnsupported) const
968 {
970  reasonIfUnsupported,
971  input,
972  output,
973  descriptor,
974  weights,
975  biases);
976 }
977 
979  const TensorInfo& output,
980  const TransposeDescriptor& descriptor,
981  Optional<std::string&> reasonIfUnsupported) const
982 {
983  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
984 }
985 
986 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &desc)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:73
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2020 ARM Limited.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:32
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:93
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &desc, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &desc)
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override