ArmNN
 21.05
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
18 
19 #if defined(ARMCOMPUTECL_ENABLED)
78 #endif
79 
80 
81 namespace armnn
82 {
83 
84 namespace
85 {
86 
87 template<unsigned int FilterSize>
88 bool IsMatchingSize2d(const TensorInfo& weightInfo)
89 {
90  // Width & Height must match.
91  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
92 }
93 
94 template<uint32_t ValidStride>
95 bool IsMatchingStride(uint32_t actualStride)
96 {
97  return ValidStride == actualStride;
98 }
99 
100 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
101 bool IsMatchingStride(uint32_t actualStride)
102 {
103  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
104 }
105 
106 template<typename ... Args>
107 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
108 {
109  IgnoreUnused(reasonIfUnsupported, (args)...);
110 #if defined(ARMCOMPUTECL_ENABLED)
111  return true;
112 #else
113  if (reasonIfUnsupported)
114  {
115  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
116  }
117  return false;
118 #endif
119 }
120 
121 #if defined(ARMCOMPUTECL_ENABLED)
122 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
123 #else
124 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
125 #endif
126 
127 #if defined(ARMCOMPUTECL_ENABLED)
128 template<class FuncType, class... Args>
129 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
130 {
131  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
132  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
133  if (!supported && reasonIfUnsupported)
134  {
135  reasonIfUnsupported.value() = aclStatus.error_description();
136  }
137  return supported;
138 }
139 
140 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
141  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
142 #else
143 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
144  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
145 #endif
146 
147 template<typename FloatFunc, typename Uint8Func, typename ... Params>
148 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
149  DataType dataType,
150  FloatFunc floatFuncPtr,
151  Uint8Func uint8FuncPtr,
152  Params&&... params)
153 {
154  return IsClBackendSupported(reasonIfUnsupported) &&
155  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
156  dataType,
157  floatFuncPtr,
158  floatFuncPtr,
159  uint8FuncPtr,
160  &FalseFunc<>,
161  &FalseFunc<>,
162  std::forward<Params>(params)...);
163 }
164 } // anonymous namespace
165 
167  : m_ModelContextPtr(modelContextPtr)
168 {
169 }
170 
172  : m_ModelContextPtr(nullptr)
173 {
174 }
175 
177  const TensorInfo& output,
178  Optional<std::string&> reasonIfUnsupported) const
179 {
181  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
182 }
183 
185  const TensorInfo& output,
186  const ActivationDescriptor& descriptor,
187  Optional<std::string&> reasonIfUnsupported) const
188 {
190  reasonIfUnsupported,
191  input,
192  output,
193  descriptor);
194 }
195 
197  const TensorInfo& input1,
198  const TensorInfo& output,
199  Optional<std::string&> reasonIfUnsupported) const
200 {
202  reasonIfUnsupported,
203  input0,
204  input1,
205  output,
206  nullptr);
207 }
208 
210  const TensorInfo& output,
211  const ArgMinMaxDescriptor& descriptor,
212  Optional<std::string&> reasonIfUnsupported) const
213 {
214 
216  reasonIfUnsupported,
217  input,
218  output,
219  descriptor);
220 }
221 
223  const TensorInfo& output,
224  const TensorInfo& mean,
225  const TensorInfo& var,
226  const TensorInfo& beta,
227  const TensorInfo& gamma,
228  const BatchNormalizationDescriptor& descriptor,
229  Optional<std::string&> reasonIfUnsupported) const
230 {
232  reasonIfUnsupported,
233  input,
234  output,
235  mean,
236  var,
237  beta,
238  gamma,
239  descriptor,
240  nullptr);
241 }
242 
244  const TensorInfo& output,
245  Optional<std::string&> reasonIfUnsupported) const
246 {
248  reasonIfUnsupported,
249  input,
250  output);
251 }
252 
254  const TensorInfo& output,
255  const BatchToSpaceNdDescriptor& descriptor,
256  Optional<std::string&> reasonIfUnsupported) const
257 {
259  reasonIfUnsupported,
260  input,
261  output,
262  descriptor);
263 }
264 
266  const TensorInfo& input1,
267  const TensorInfo& output,
268  const ComparisonDescriptor& descriptor,
269  Optional<std::string&> reasonIfUnsupported) const
270 {
272  reasonIfUnsupported,
273  input0,
274  input1,
275  output,
276  descriptor);
277 }
278 
279 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
280  const TensorInfo& output,
281  const ConcatDescriptor& descriptor,
282  Optional<std::string&> reasonIfUnsupported) const
283 {
284  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
285  {
286  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
287  return false;
288  }
289 
290  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
291  if(concatInnerAxis < 3) // Width, height, or channels
292  {
294  reasonIfUnsupported,
295  inputs,
296  output,
297  descriptor);
298  }
299  else if (concatInnerAxis == 3)
300  {
301  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
302  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
303  for (auto& input : inputs)
304  {
305  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
306  {
307  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
308  return false;
309  }
310  }
311  return true; // Sub-tensors support concat along batch
312  }
313  else // > 4 dimensions not supported.
314  {
315  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
316  return false;
317  }
318 }
319 
321  Optional<std::string&> reasonIfUnsupported) const
322 {
324  reasonIfUnsupported,
325  output);
326 }
327 
329  const TensorInfo& output,
330  Optional<std::string&> reasonIfUnsupported) const
331 {
333  reasonIfUnsupported,
334  input,
335  output);
336 }
337 
339  const TensorInfo& output,
340  Optional<std::string&> reasonIfUnsupported) const
341 {
343  reasonIfUnsupported,
344  input,
345  output);
346 }
347 
349  const TensorInfo& output,
350  const Convolution2dDescriptor& descriptor,
351  const TensorInfo& weights,
352  const Optional<TensorInfo>& biases,
353  Optional<std::string&> reasonIfUnsupported) const
354 {
355  bool isFastMathEnabled = false;
356 #if defined(ARMCOMPUTECL_ENABLED)
357  if (m_ModelContextPtr)
358  {
359  if (m_ModelContextPtr.get() != nullptr)
360  {
361  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
362  if (modelOptions)
363  {
364  isFastMathEnabled = modelOptions->IsFastMathEnabled();
365  }
366  }
367  }
368 #endif
369 
371  reasonIfUnsupported,
372  input,
373  output,
374  descriptor,
375  weights,
376  biases,
377  isFastMathEnabled,
378  nullptr);
379 }
380 
382  const TensorInfo& output,
383  Optional<std::string&> reasonIfUnsupported) const
384 {
386  reasonIfUnsupported,
387  input,
388  output);
389 }
390 
392  const TensorInfo& output,
393  const DepthToSpaceDescriptor& descriptor,
394  Optional<std::string&> reasonIfUnsupported) const
395 {
397  reasonIfUnsupported,
398  input,
399  output,
400  descriptor);
401 }
402 
404  const TensorInfo& output,
405  const DepthwiseConvolution2dDescriptor& descriptor,
406  const TensorInfo& weights,
407  const Optional<TensorInfo>& biases,
408  Optional<std::string&> reasonIfUnsupported) const
409 {
411  reasonIfUnsupported,
412  input,
413  output,
414  descriptor,
415  weights,
416  biases,
417  nullptr);
418 }
419 
421  const TensorInfo& output,
422  const DepthwiseConvolution2dDescriptor& descriptor,
423  const TensorInfo& weights,
424  const Optional<TensorInfo>& biases,
425  Optional<std::string&> reasonIfUnsupported) const
426 {
428  reasonIfUnsupported,
429  input,
430  output,
431  descriptor,
432  weights,
433  biases,
434  nullptr);
435 }
436 
437 
439  const TensorInfo& input1,
440  const TensorInfo& output,
441  Optional<std::string&> reasonIfUnsupported) const
442 {
444  reasonIfUnsupported,
445  input0,
446  input1,
447  output,
448  nullptr);
449 }
450 
452  const TensorInfo& output,
453  const ElementwiseUnaryDescriptor& descriptor,
454  Optional<std::string&> reasonIfUnsupported) const
455 {
456  switch(descriptor.m_Operation)
457  {
458  case UnaryOperation::Abs:
460  reasonIfUnsupported,
461  input,
462  output);
463  case UnaryOperation::Exp:
465  reasonIfUnsupported,
466  input,
467  output);
468  case UnaryOperation::Neg:
470  reasonIfUnsupported,
471  input,
472  output);
475  reasonIfUnsupported,
476  input,
477  output);
480  reasonIfUnsupported,
481  input,
482  output);
483  default:
484  return false;
485  }
486 }
487 
489  const TensorInfo& output,
490  const FillDescriptor& descriptor,
491  Optional<std::string&> reasonIfUnsupported) const
492 {
493  armnn::IgnoreUnused(input);
494  armnn::IgnoreUnused(output);
495  armnn::IgnoreUnused(descriptor);
496 
497  return IsClBackendSupported(reasonIfUnsupported);
498 }
499 
501  const TensorInfo& output,
502  Optional<std::string&> reasonIfUnsupported) const
503 {
505  reasonIfUnsupported,
506  input,
507  output);
508 }
509 
511  const TensorInfo& output,
512  const TensorInfo& weights,
513  const TensorInfo& biases,
514  const FullyConnectedDescriptor& descriptor,
515  Optional<std::string&> reasonIfUnsupported) const
516 {
518  reasonIfUnsupported,
519  input,
520  output,
521  weights,
522  biases,
523  descriptor,
524  nullptr);
525 }
526 
528  const TensorInfo& input1,
529  const TensorInfo& output,
530  const GatherDescriptor& descriptor,
531  Optional<std::string&> reasonIfUnsupported) const
532 {
534  reasonIfUnsupported,
535  input0,
536  input1,
537  output,
538  descriptor);
539 }
540 
542  const TensorInfo& input1,
543  const TensorInfo& output,
544  Optional<std::string&> reasonIfUnsupported) const
545 {
547  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
548 }
549 
551  Optional<std::string&> reasonIfUnsupported) const
552 {
553  return IsClBackendSupported(reasonIfUnsupported, input);
554 }
555 
557  const TensorInfo& output,
558  const InstanceNormalizationDescriptor& descriptor,
559  Optional<std::string&> reasonIfUnsupported) const
560 {
562  reasonIfUnsupported,
563  input,
564  output,
565  descriptor);
566 }
567 
569  const TensorInfo& output,
570  const L2NormalizationDescriptor& descriptor,
571  Optional<std::string&> reasonIfUnsupported) const
572 {
574  reasonIfUnsupported,
575  input,
576  output,
577  descriptor);
578 }
579 
581  const TensorInfo& input1,
582  const TensorInfo& output,
583  const LogicalBinaryDescriptor& descriptor,
584  Optional<std::string&> reasonIfUnsupported) const
585 {
586  IgnoreUnused(output);
587 
588  switch(descriptor.m_Operation)
589  {
592  reasonIfUnsupported,
593  input0,
594  input1,
595  output);
598  reasonIfUnsupported,
599  input0,
600  input1,
601  output);
602  default:
603  return false;
604  }
605 }
606 
607 
609  const TensorInfo& output,
610  const LogSoftmaxDescriptor& descriptor,
611  Optional<std::string&> reasonIfUnsupported) const
612 {
614  reasonIfUnsupported,
615  input,
616  output,
617  descriptor);
618 }
619 
621  const TensorInfo& outputStateIn,
622  const TensorInfo& cellStateIn,
623  const TensorInfo& scratchBuffer,
624  const TensorInfo& outputStateOut,
625  const TensorInfo& cellStateOut,
626  const TensorInfo& output,
627  const LstmDescriptor& descriptor,
628  const LstmInputParamsInfo& paramsInfo,
629  Optional<std::string&> reasonIfUnsupported) const
630 {
632  reasonIfUnsupported,
633  input,
634  outputStateIn,
635  cellStateIn,
636  scratchBuffer,
637  outputStateOut,
638  cellStateOut,
639  output,
640  descriptor,
641  paramsInfo);
642 }
643 
645  const TensorInfo& input1,
646  const TensorInfo& output,
647  Optional<std::string&> reasonIfUnsupported) const
648 {
650  reasonIfUnsupported,
651  input0,
652  input1,
653  output);
654 }
655 
657  const TensorInfo& output,
658  const MeanDescriptor& descriptor,
659  Optional<std::string&> reasonIfUnsupported) const
660 {
662  reasonIfUnsupported,
663  input,
664  output,
665  descriptor);
666 }
667 
668 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
669  const TensorInfo& output,
670  const MergerDescriptor& descriptor,
671  Optional<std::string&> reasonIfUnsupported) const
672 {
673  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
674 }
675 
677  const TensorInfo& input1,
678  const TensorInfo& output,
679  Optional<std::string&> reasonIfUnsupported) const
680 {
682  reasonIfUnsupported,
683  input0,
684  input1,
685  output);
686 }
687 
689  const TensorInfo& input1,
690  const TensorInfo& output,
691  Optional<std::string&> reasonIfUnsupported) const
692 {
694  reasonIfUnsupported,
695  input0,
696  input1,
697  output,
698  nullptr);
699 }
700 
702  const TensorInfo& output,
703  const NormalizationDescriptor& descriptor,
704  Optional<std::string&> reasonIfUnsupported) const
705 {
706  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
707 }
708 
710  Optional<std::string&> reasonIfUnsupported) const
711 {
712  return IsClBackendSupported(reasonIfUnsupported, output);
713 }
714 
716  const TensorInfo& output,
717  const PadDescriptor& descriptor,
718  Optional<std::string&> reasonIfUnsupported) const
719 {
721  reasonIfUnsupported,
722  input,
723  output,
724  descriptor);
725 }
726 
728  const TensorInfo& output,
729  const PermuteDescriptor& descriptor,
730  Optional<std::string&> reasonIfUnsupported) const
731 {
732  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
733 }
734 
736  const TensorInfo& output,
737  const Pooling2dDescriptor& descriptor,
738  Optional<std::string&> reasonIfUnsupported) const
739 {
740  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
741 }
742 
744  const armnn::TensorInfo &alpha,
745  const armnn::TensorInfo &output,
746  armnn::Optional<std::string &> reasonIfUnsupported) const
747 {
748  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
749 }
750 
752  const TensorInfo& previousOutputIn,
753  const TensorInfo& previousCellStateIn,
754  const TensorInfo& outputStateOut,
755  const TensorInfo& cellStateOut,
756  const TensorInfo& output,
757  const QLstmDescriptor& descriptor,
758  const LstmInputParamsInfo& paramsInfo,
759  Optional<std::string&> reasonIfUnsupported) const
760 {
761  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
762  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
763  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
764  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
765  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
767  {
769  reasonIfUnsupported,
770  input,
771  previousCellStateIn,
772  previousOutputIn,
773  cellStateOut,
774  outputStateOut,
775  output,
776  descriptor,
777  paramsInfo);
778  }
779  else
780  {
781  return false;
782  }
783 }
784 
786  const TensorInfo& previousCellStateIn,
787  const TensorInfo& previousOutputIn,
788  const TensorInfo& cellStateOut,
789  const TensorInfo& output,
790  const QuantizedLstmInputParamsInfo& paramsInfo,
791  Optional<std::string&> reasonIfUnsupported) const
792 {
794  reasonIfUnsupported,
795  input,
796  previousCellStateIn,
797  previousOutputIn,
798  cellStateOut,
799  output,
800  paramsInfo);
801 }
802 
804  const TensorInfo& output,
805  Optional<std::string&> reasonIfUnsupported) const
806 {
808  reasonIfUnsupported,
809  input,
810  output);
811 }
812 
814  const TensorInfo& output,
815  const ReduceDescriptor& descriptor,
816  Optional<std::string&> reasonIfUnsupported) const
817 {
819  reasonIfUnsupported,
820  input,
821  output,
822  descriptor);
823 }
824 
826  const TensorInfo& output,
827  const ReshapeDescriptor& descriptor,
828  Optional<std::string&> reasonIfUnsupported) const
829 {
830  IgnoreUnused(descriptor);
831  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
832 }
833 
835  const TensorInfo& output,
836  const ResizeDescriptor& descriptor,
837  Optional<std::string&> reasonIfUnsupported) const
838 {
839  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
840 }
841 
843  const TensorInfo& output,
844  Optional<std::string&> reasonIfUnsupported) const
845 {
846  ResizeDescriptor descriptor;
847  descriptor.m_Method = ResizeMethod::Bilinear;
848  descriptor.m_DataLayout = DataLayout::NCHW;
849 
850  const TensorShape& outputShape = output.GetShape();
851  descriptor.m_TargetHeight = outputShape[2];
852  descriptor.m_TargetWidth = outputShape[3];
853 
854  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
855 }
856 
858  const TensorInfo& output,
859  Optional<std::string&> reasonIfUnsupported) const
860 {
862  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
863 }
864 
866  const TensorInfo& output,
867  const SliceDescriptor& descriptor,
868  Optional<std::string&> reasonIfUnsupported) const
869 {
870  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
871 }
872 
874  const TensorInfo& output,
875  const SoftmaxDescriptor& descriptor,
876  Optional<std::string&> reasonIfUnsupported) const
877 {
878  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
879 }
880 
882  const TensorInfo& output,
883  const SpaceToBatchNdDescriptor& descriptor,
884  Optional<std::string&> reasonIfUnsupported) const
885 {
887  reasonIfUnsupported,
888  input,
889  output,
890  descriptor);
891 }
892 
894  const TensorInfo& output,
895  const SpaceToDepthDescriptor& descriptor,
896  Optional<std::string&> reasonIfUnsupported) const
897 {
899  reasonIfUnsupported,
900  input,
901  output,
902  descriptor);
903 }
904 
906  const ViewsDescriptor& descriptor,
907  Optional<std::string&> reasonIfUnsupported) const
908 {
909  IgnoreUnused(descriptor);
910  return IsSupportedForDataTypeCl(reasonIfUnsupported,
911  input.GetDataType(),
912  &TrueFunc<>,
913  &TrueFunc<>);
914 }
915 
917  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
918  const ViewsDescriptor& descriptor,
919  Optional<std::string&> reasonIfUnsupported) const
920 {
921 #if defined(ARMCOMPUTECL_ENABLED)
922  // Split along the last dimension, cannot use sub-tensors
923  // as width and height of the sub-tensors do not match
924  // the width and height of the parent tensor
925  // in case of input with more than 2D.
926  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
927  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
928  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
929  {
931  reasonIfUnsupported,
932  input,
933  outputs,
934  *splitAxis.begin());
935  }
936 #endif
937  IgnoreUnused(descriptor);
938  for (auto output : outputs)
939  {
940  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
941  {
942  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
943  return false;
944  }
945  }
946  return true;
947 }
948 
949 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
950  const TensorInfo& output,
951  const StackDescriptor& descriptor,
952  Optional<std::string&> reasonIfUnsupported) const
953 {
955  reasonIfUnsupported,
956  inputs,
957  output,
958  descriptor);
959 }
960 
962  const TensorInfo& output,
963  const StridedSliceDescriptor& descriptor,
964  Optional<std::string&> reasonIfUnsupported) const
965 {
967  reasonIfUnsupported,
968  input,
969  output,
970  descriptor);
971 }
972 
974  const TensorInfo& input1,
975  const TensorInfo& output,
976  Optional<std::string&> reasonIfUnsupported) const
977 {
979  reasonIfUnsupported,
980  input0,
981  input1,
982  output,
983  nullptr);
984 }
985 
987  const TensorInfo& output,
988  const TransposeConvolution2dDescriptor& descriptor,
989  const TensorInfo& weights,
990  const Optional<TensorInfo>& biases,
991  Optional<std::string&> reasonIfUnsupported) const
992 {
994  reasonIfUnsupported,
995  input,
996  output,
997  descriptor,
998  weights,
999  biases);
1000 }
1001 
1003  const TensorInfo& output,
1004  const TransposeDescriptor& descriptor,
1005  Optional<std::string&> reasonIfUnsupported) const
1006 {
1007  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1008 }
1009 
1010 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &desc)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:36
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Status
enumeration
Definition: Types.hpp:30
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &desc, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &desc)
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &desc)
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override