ArmNN
 21.08
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
18 
19 #if defined(ARMCOMPUTECL_ENABLED)
80 #endif
81 
82 
83 namespace armnn
84 {
85 
86 namespace
87 {
88 
89 template<unsigned int FilterSize>
90 bool IsMatchingSize2d(const TensorInfo& weightInfo)
91 {
92  // Width & Height must match.
93  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
94 }
95 
96 template<uint32_t ValidStride>
97 bool IsMatchingStride(uint32_t actualStride)
98 {
99  return ValidStride == actualStride;
100 }
101 
102 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
103 bool IsMatchingStride(uint32_t actualStride)
104 {
105  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
106 }
107 
108 template<typename ... Args>
109 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
110 {
111  IgnoreUnused(reasonIfUnsupported, (args)...);
112 #if defined(ARMCOMPUTECL_ENABLED)
113  return true;
114 #else
115  if (reasonIfUnsupported)
116  {
117  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
118  }
119  return false;
120 #endif
121 }
122 
123 #if defined(ARMCOMPUTECL_ENABLED)
124 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
125 #else
126 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
127 #endif
128 
129 #if defined(ARMCOMPUTECL_ENABLED)
130 template<class FuncType, class... Args>
131 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
132 {
133  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
134  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
135  if (!supported && reasonIfUnsupported)
136  {
137  reasonIfUnsupported.value() = aclStatus.error_description();
138  }
139  return supported;
140 }
141 
142 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
143  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
144 #else
145 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
146  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
147 #endif
148 
149 template<typename FloatFunc, typename Uint8Func, typename ... Params>
150 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
151  DataType dataType,
152  FloatFunc floatFuncPtr,
153  Uint8Func uint8FuncPtr,
154  Params&&... params)
155 {
156  return IsClBackendSupported(reasonIfUnsupported) &&
157  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
158  dataType,
159  floatFuncPtr,
160  floatFuncPtr,
161  uint8FuncPtr,
162  &FalseFunc<>,
163  &FalseFunc<>,
164  std::forward<Params>(params)...);
165 }
166 } // anonymous namespace
167 
169  : m_ModelContextPtr(modelContextPtr)
170 {
171 }
172 
174  : m_ModelContextPtr(nullptr)
175 {
176 }
177 
179  const TensorInfo& output,
180  Optional<std::string&> reasonIfUnsupported) const
181 {
183  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
184 }
185 
187  const TensorInfo& output,
188  const ActivationDescriptor& descriptor,
189  Optional<std::string&> reasonIfUnsupported) const
190 {
192  reasonIfUnsupported,
193  input,
194  output,
195  descriptor);
196 }
197 
199  const TensorInfo& input1,
200  const TensorInfo& output,
201  Optional<std::string&> reasonIfUnsupported) const
202 {
204  reasonIfUnsupported,
205  input0,
206  input1,
207  output,
208  nullptr);
209 }
210 
212  const TensorInfo& output,
213  const ArgMinMaxDescriptor& descriptor,
214  Optional<std::string&> reasonIfUnsupported) const
215 {
216 
218  reasonIfUnsupported,
219  input,
220  output,
221  descriptor);
222 }
223 
225  const TensorInfo& output,
226  const TensorInfo& mean,
227  const TensorInfo& var,
228  const TensorInfo& beta,
229  const TensorInfo& gamma,
230  const BatchNormalizationDescriptor& descriptor,
231  Optional<std::string&> reasonIfUnsupported) const
232 {
234  reasonIfUnsupported,
235  input,
236  output,
237  mean,
238  var,
239  beta,
240  gamma,
241  descriptor,
242  nullptr);
243 }
244 
246  const TensorInfo& output,
247  Optional<std::string&> reasonIfUnsupported) const
248 {
250  reasonIfUnsupported,
251  input,
252  output);
253 }
254 
256  const TensorInfo& output,
257  const BatchToSpaceNdDescriptor& descriptor,
258  Optional<std::string&> reasonIfUnsupported) const
259 {
261  reasonIfUnsupported,
262  input,
263  output,
264  descriptor);
265 }
266 
268  const TensorInfo& input1,
269  const TensorInfo& output,
270  const ComparisonDescriptor& descriptor,
271  Optional<std::string&> reasonIfUnsupported) const
272 {
274  reasonIfUnsupported,
275  input0,
276  input1,
277  output,
278  descriptor);
279 }
280 
281 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
282  const TensorInfo& output,
283  const ConcatDescriptor& descriptor,
284  Optional<std::string&> reasonIfUnsupported) const
285 {
286  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
287  {
288  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
289  return false;
290  }
291 
292  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
293  if(concatInnerAxis < 3) // Width, height, or channels
294  {
296  reasonIfUnsupported,
297  inputs,
298  output,
299  descriptor);
300  }
301  else if (concatInnerAxis == 3)
302  {
303  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
304  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
305  for (auto& input : inputs)
306  {
307  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
308  {
309  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
310  return false;
311  }
312  }
313  return true; // Sub-tensors support concat along batch
314  }
315  else // > 4 dimensions not supported.
316  {
317  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
318  return false;
319  }
320 }
321 
323  Optional<std::string&> reasonIfUnsupported) const
324 {
326  reasonIfUnsupported,
327  output);
328 }
329 
331  const TensorInfo& output,
332  Optional<std::string&> reasonIfUnsupported) const
333 {
335  reasonIfUnsupported,
336  input,
337  output);
338 }
339 
341  const TensorInfo& output,
342  Optional<std::string&> reasonIfUnsupported) const
343 {
345  reasonIfUnsupported,
346  input,
347  output);
348 }
349 
351  const TensorInfo& output,
352  const Convolution2dDescriptor& descriptor,
353  const TensorInfo& weights,
354  const Optional<TensorInfo>& biases,
355  Optional<std::string&> reasonIfUnsupported) const
356 {
357  bool isFastMathEnabled = false;
358 #if defined(ARMCOMPUTECL_ENABLED)
359  if (m_ModelContextPtr)
360  {
361  if (m_ModelContextPtr.get() != nullptr)
362  {
363  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
364  if (modelOptions)
365  {
366  isFastMathEnabled = modelOptions->IsFastMathEnabled();
367  }
368  }
369  }
370 #endif
371 
373  reasonIfUnsupported,
374  input,
375  output,
376  descriptor,
377  weights,
378  biases,
379  isFastMathEnabled,
380  nullptr);
381 }
382 
384  const TensorInfo& output,
385  Optional<std::string&> reasonIfUnsupported) const
386 {
388  reasonIfUnsupported,
389  input,
390  output);
391 }
392 
394  const TensorInfo& output,
395  const DepthToSpaceDescriptor& descriptor,
396  Optional<std::string&> reasonIfUnsupported) const
397 {
399  reasonIfUnsupported,
400  input,
401  output,
402  descriptor);
403 }
404 
406  const TensorInfo& output,
407  const DepthwiseConvolution2dDescriptor& descriptor,
408  const TensorInfo& weights,
409  const Optional<TensorInfo>& biases,
410  Optional<std::string&> reasonIfUnsupported) const
411 {
413  reasonIfUnsupported,
414  input,
415  output,
416  descriptor,
417  weights,
418  biases,
419  nullptr);
420 }
421 
423  const TensorInfo& output,
424  const DepthwiseConvolution2dDescriptor& descriptor,
425  const TensorInfo& weights,
426  const Optional<TensorInfo>& biases,
427  Optional<std::string&> reasonIfUnsupported) const
428 {
430  reasonIfUnsupported,
431  input,
432  output,
433  descriptor,
434  weights,
435  biases,
436  nullptr);
437 }
438 
439 
441  const TensorInfo& input1,
442  const TensorInfo& output,
443  Optional<std::string&> reasonIfUnsupported) const
444 {
446  reasonIfUnsupported,
447  input0,
448  input1,
449  output,
450  nullptr);
451 }
452 
454  const TensorInfo& output,
455  const ElementwiseUnaryDescriptor& descriptor,
456  Optional<std::string&> reasonIfUnsupported) const
457 {
458  switch(descriptor.m_Operation)
459  {
460  case UnaryOperation::Abs:
462  reasonIfUnsupported,
463  input,
464  output);
465  case UnaryOperation::Exp:
467  reasonIfUnsupported,
468  input,
469  output);
470  case UnaryOperation::Log:
472  reasonIfUnsupported,
473  input,
474  output);
477  reasonIfUnsupported,
478  input,
479  output);
480  case UnaryOperation::Neg:
482  reasonIfUnsupported,
483  input,
484  output);
487  reasonIfUnsupported,
488  input,
489  output);
490  case UnaryOperation::Sin:
492  reasonIfUnsupported,
493  input,
494  output);
495  default:
496  return false;
497  }
498 }
499 
501  const TensorInfo& output,
502  const FillDescriptor& descriptor,
503  Optional<std::string&> reasonIfUnsupported) const
504 {
505  armnn::IgnoreUnused(input);
506  armnn::IgnoreUnused(output);
507  armnn::IgnoreUnused(descriptor);
508 
509  return IsClBackendSupported(reasonIfUnsupported);
510 }
511 
513  const TensorInfo& output,
514  Optional<std::string&> reasonIfUnsupported) const
515 {
517  reasonIfUnsupported,
518  input,
519  output);
520 }
521 
523  const TensorInfo& output,
524  const TensorInfo& weights,
525  const TensorInfo& biases,
526  const FullyConnectedDescriptor& descriptor,
527  Optional<std::string&> reasonIfUnsupported) const
528 {
530  reasonIfUnsupported,
531  input,
532  output,
533  weights,
534  biases,
535  descriptor,
536  nullptr);
537 }
538 
540  const TensorInfo& input1,
541  const TensorInfo& output,
542  const GatherDescriptor& descriptor,
543  Optional<std::string&> reasonIfUnsupported) const
544 {
546  reasonIfUnsupported,
547  input0,
548  input1,
549  output,
550  descriptor);
551 }
552 
554  const TensorInfo& input1,
555  const TensorInfo& output,
556  Optional<std::string&> reasonIfUnsupported) const
557 {
559  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
560 }
561 
563  Optional<std::string&> reasonIfUnsupported) const
564 {
565  return IsClBackendSupported(reasonIfUnsupported, input);
566 }
567 
569  const TensorInfo& output,
570  const InstanceNormalizationDescriptor& descriptor,
571  Optional<std::string&> reasonIfUnsupported) const
572 {
574  reasonIfUnsupported,
575  input,
576  output,
577  descriptor);
578 }
579 
581  const TensorInfo& output,
582  const L2NormalizationDescriptor& descriptor,
583  Optional<std::string&> reasonIfUnsupported) const
584 {
586  reasonIfUnsupported,
587  input,
588  output,
589  descriptor);
590 }
591 
593  const TensorInfo& input1,
594  const TensorInfo& output,
595  const LogicalBinaryDescriptor& descriptor,
596  Optional<std::string&> reasonIfUnsupported) const
597 {
598  IgnoreUnused(output);
599 
600  switch(descriptor.m_Operation)
601  {
604  reasonIfUnsupported,
605  input0,
606  input1,
607  output);
610  reasonIfUnsupported,
611  input0,
612  input1,
613  output);
614  default:
615  return false;
616  }
617 }
618 
619 
621  const TensorInfo& output,
622  const LogSoftmaxDescriptor& descriptor,
623  Optional<std::string&> reasonIfUnsupported) const
624 {
626  reasonIfUnsupported,
627  input,
628  output,
629  descriptor);
630 }
631 
633  const TensorInfo& outputStateIn,
634  const TensorInfo& cellStateIn,
635  const TensorInfo& scratchBuffer,
636  const TensorInfo& outputStateOut,
637  const TensorInfo& cellStateOut,
638  const TensorInfo& output,
639  const LstmDescriptor& descriptor,
640  const LstmInputParamsInfo& paramsInfo,
641  Optional<std::string&> reasonIfUnsupported) const
642 {
644  reasonIfUnsupported,
645  input,
646  outputStateIn,
647  cellStateIn,
648  scratchBuffer,
649  outputStateOut,
650  cellStateOut,
651  output,
652  descriptor,
653  paramsInfo);
654 }
655 
657  const TensorInfo& input1,
658  const TensorInfo& output,
659  Optional<std::string&> reasonIfUnsupported) const
660 {
662  reasonIfUnsupported,
663  input0,
664  input1,
665  output);
666 }
667 
669  const TensorInfo& output,
670  const MeanDescriptor& descriptor,
671  Optional<std::string&> reasonIfUnsupported) const
672 {
674  reasonIfUnsupported,
675  input,
676  output,
677  descriptor);
678 }
679 
680 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
681  const TensorInfo& output,
682  const MergerDescriptor& descriptor,
683  Optional<std::string&> reasonIfUnsupported) const
684 {
685  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
686 }
687 
689  const TensorInfo& input1,
690  const TensorInfo& output,
691  Optional<std::string&> reasonIfUnsupported) const
692 {
694  reasonIfUnsupported,
695  input0,
696  input1,
697  output);
698 }
699 
701  const TensorInfo& input1,
702  const TensorInfo& output,
703  Optional<std::string&> reasonIfUnsupported) const
704 {
706  reasonIfUnsupported,
707  input0,
708  input1,
709  output,
710  nullptr);
711 }
712 
714  const TensorInfo& output,
715  const NormalizationDescriptor& descriptor,
716  Optional<std::string&> reasonIfUnsupported) const
717 {
718  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
719 }
720 
722  Optional<std::string&> reasonIfUnsupported) const
723 {
724  return IsClBackendSupported(reasonIfUnsupported, output);
725 }
726 
728  const TensorInfo& output,
729  const PadDescriptor& descriptor,
730  Optional<std::string&> reasonIfUnsupported) const
731 {
733  reasonIfUnsupported,
734  input,
735  output,
736  descriptor);
737 }
738 
740  const TensorInfo& output,
741  const PermuteDescriptor& descriptor,
742  Optional<std::string&> reasonIfUnsupported) const
743 {
744  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
745 }
746 
748  const TensorInfo& output,
749  const Pooling2dDescriptor& descriptor,
750  Optional<std::string&> reasonIfUnsupported) const
751 {
752  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
753 }
754 
756  const armnn::TensorInfo &alpha,
757  const armnn::TensorInfo &output,
758  armnn::Optional<std::string &> reasonIfUnsupported) const
759 {
760  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
761 }
762 
764  const TensorInfo& previousOutputIn,
765  const TensorInfo& previousCellStateIn,
766  const TensorInfo& outputStateOut,
767  const TensorInfo& cellStateOut,
768  const TensorInfo& output,
769  const QLstmDescriptor& descriptor,
770  const LstmInputParamsInfo& paramsInfo,
771  Optional<std::string&> reasonIfUnsupported) const
772 {
773  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
774  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
775  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
776  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
777  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
779  {
781  reasonIfUnsupported,
782  input,
783  previousCellStateIn,
784  previousOutputIn,
785  cellStateOut,
786  outputStateOut,
787  output,
788  descriptor,
789  paramsInfo);
790  }
791  else
792  {
793  return false;
794  }
795 }
796 
798  const TensorInfo& previousCellStateIn,
799  const TensorInfo& previousOutputIn,
800  const TensorInfo& cellStateOut,
801  const TensorInfo& output,
802  const QuantizedLstmInputParamsInfo& paramsInfo,
803  Optional<std::string&> reasonIfUnsupported) const
804 {
806  reasonIfUnsupported,
807  input,
808  previousCellStateIn,
809  previousOutputIn,
810  cellStateOut,
811  output,
812  paramsInfo);
813 }
814 
816  const TensorInfo& output,
817  Optional<std::string&> reasonIfUnsupported) const
818 {
820  reasonIfUnsupported,
821  input,
822  output);
823 }
824 
826  const TensorInfo& output,
827  const ReduceDescriptor& descriptor,
828  Optional<std::string&> reasonIfUnsupported) const
829 {
831  reasonIfUnsupported,
832  input,
833  output,
834  descriptor);
835 }
836 
838  const TensorInfo& output,
839  const ReshapeDescriptor& descriptor,
840  Optional<std::string&> reasonIfUnsupported) const
841 {
842  IgnoreUnused(descriptor);
843  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
844 }
845 
847  const TensorInfo& output,
848  const ResizeDescriptor& descriptor,
849  Optional<std::string&> reasonIfUnsupported) const
850 {
851  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
852 }
853 
855  const TensorInfo& output,
856  Optional<std::string&> reasonIfUnsupported) const
857 {
858  ResizeDescriptor descriptor;
859  descriptor.m_Method = ResizeMethod::Bilinear;
860  descriptor.m_DataLayout = DataLayout::NCHW;
861 
862  const TensorShape& outputShape = output.GetShape();
863  descriptor.m_TargetHeight = outputShape[2];
864  descriptor.m_TargetWidth = outputShape[3];
865 
866  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
867 }
868 
870  const TensorInfo& output,
871  Optional<std::string&> reasonIfUnsupported) const
872 {
874  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
875 }
876 
878  const TensorInfo& output,
879  const SliceDescriptor& descriptor,
880  Optional<std::string&> reasonIfUnsupported) const
881 {
882  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
883 }
884 
886  const TensorInfo& output,
887  const SoftmaxDescriptor& descriptor,
888  Optional<std::string&> reasonIfUnsupported) const
889 {
890  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
891 }
892 
894  const TensorInfo& output,
895  const SpaceToBatchNdDescriptor& descriptor,
896  Optional<std::string&> reasonIfUnsupported) const
897 {
899  reasonIfUnsupported,
900  input,
901  output,
902  descriptor);
903 }
904 
906  const TensorInfo& output,
907  const SpaceToDepthDescriptor& descriptor,
908  Optional<std::string&> reasonIfUnsupported) const
909 {
911  reasonIfUnsupported,
912  input,
913  output,
914  descriptor);
915 }
916 
918  const ViewsDescriptor& descriptor,
919  Optional<std::string&> reasonIfUnsupported) const
920 {
921  IgnoreUnused(descriptor);
922  return IsSupportedForDataTypeCl(reasonIfUnsupported,
923  input.GetDataType(),
924  &TrueFunc<>,
925  &TrueFunc<>);
926 }
927 
929  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
930  const ViewsDescriptor& descriptor,
931  Optional<std::string&> reasonIfUnsupported) const
932 {
933 #if defined(ARMCOMPUTECL_ENABLED)
934  // Split along the last dimension, cannot use sub-tensors
935  // as width and height of the sub-tensors do not match
936  // the width and height of the parent tensor
937  // in case of input with more than 2D.
938  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
939  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
940  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
941  {
943  reasonIfUnsupported,
944  input,
945  outputs,
946  *splitAxis.begin());
947  }
948 #endif
949  IgnoreUnused(descriptor);
950  for (auto output : outputs)
951  {
952  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
953  {
954  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
955  return false;
956  }
957  }
958  return true;
959 }
960 
961 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
962  const TensorInfo& output,
963  const StackDescriptor& descriptor,
964  Optional<std::string&> reasonIfUnsupported) const
965 {
967  reasonIfUnsupported,
968  inputs,
969  output,
970  descriptor);
971 }
972 
974  const TensorInfo& output,
975  const StridedSliceDescriptor& descriptor,
976  Optional<std::string&> reasonIfUnsupported) const
977 {
979  reasonIfUnsupported,
980  input,
981  output,
982  descriptor);
983 }
984 
986  const TensorInfo& input1,
987  const TensorInfo& output,
988  Optional<std::string&> reasonIfUnsupported) const
989 {
991  reasonIfUnsupported,
992  input0,
993  input1,
994  output,
995  nullptr);
996 }
997 
999  const TensorInfo& output,
1000  const TransposeConvolution2dDescriptor& descriptor,
1001  const TensorInfo& weights,
1002  const Optional<TensorInfo>& biases,
1003  Optional<std::string&> reasonIfUnsupported) const
1004 {
1006  reasonIfUnsupported,
1007  input,
1008  output,
1009  descriptor,
1010  weights,
1011  biases);
1012 }
1013 
1015  const TensorInfo& output,
1016  const TransposeDescriptor& descriptor,
1017  Optional<std::string&> reasonIfUnsupported) const
1018 {
1019  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1020 }
1021 
1022 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:434
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
DataType
Definition: Types.hpp:35
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Status
enumeration
Definition: Types.hpp:29
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)