ArmNN
 21.11
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
18 
19 #if defined(ARMCOMPUTECL_ENABLED)
82 #endif
83 
84 
85 namespace armnn
86 {
87 
88 namespace
89 {
90 
91 template<unsigned int FilterSize>
92 bool IsMatchingSize2d(const TensorInfo& weightInfo)
93 {
94  // Width & Height must match.
95  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
96 }
97 
98 template<uint32_t ValidStride>
99 bool IsMatchingStride(uint32_t actualStride)
100 {
101  return ValidStride == actualStride;
102 }
103 
104 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
105 bool IsMatchingStride(uint32_t actualStride)
106 {
107  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
108 }
109 
110 template<typename ... Args>
111 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
112 {
113  IgnoreUnused(reasonIfUnsupported, (args)...);
114 #if defined(ARMCOMPUTECL_ENABLED)
115  return true;
116 #else
117  if (reasonIfUnsupported)
118  {
119  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
120  }
121  return false;
122 #endif
123 }
124 
125 #if defined(ARMCOMPUTECL_ENABLED)
126 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
127 #else
128 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
129 #endif
130 
131 #if defined(ARMCOMPUTECL_ENABLED)
132 template<class FuncType, class... Args>
133 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
134 {
135  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
136  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
137  if (!supported && reasonIfUnsupported)
138  {
139  reasonIfUnsupported.value() = aclStatus.error_description();
140  }
141  return supported;
142 }
143 
144 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
145  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
146 #else
147 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
148  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
149 #endif
150 
151 template<typename FloatFunc, typename Uint8Func, typename ... Params>
152 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
153  DataType dataType,
154  FloatFunc floatFuncPtr,
155  Uint8Func uint8FuncPtr,
156  Params&&... params)
157 {
158  return IsClBackendSupported(reasonIfUnsupported) &&
159  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
160  dataType,
161  floatFuncPtr,
162  floatFuncPtr,
163  uint8FuncPtr,
164  &FalseFunc<>,
165  &FalseFunc<>,
166  std::forward<Params>(params)...);
167 }
168 } // anonymous namespace
169 
171  : m_ModelContextPtr(modelContextPtr)
172 {
173 }
174 
176  : m_ModelContextPtr(nullptr)
177 {
178 }
179 
181  const TensorInfo& output,
182  const ActivationDescriptor& descriptor,
183  Optional<std::string&> reasonIfUnsupported) const
184 {
186  reasonIfUnsupported,
187  input,
188  output,
189  descriptor);
190 }
191 
193  const TensorInfo& input1,
194  const TensorInfo& output,
195  Optional<std::string&> reasonIfUnsupported) const
196 {
198  reasonIfUnsupported,
199  input0,
200  input1,
201  output,
202  nullptr);
203 }
204 
206  const TensorInfo& output,
207  const ArgMinMaxDescriptor& descriptor,
208  Optional<std::string&> reasonIfUnsupported) const
209 {
210 
212  reasonIfUnsupported,
213  input,
214  output,
215  descriptor);
216 }
217 
219  const TensorInfo& output,
220  const TensorInfo& mean,
221  const TensorInfo& var,
222  const TensorInfo& beta,
223  const TensorInfo& gamma,
224  const BatchNormalizationDescriptor& descriptor,
225  Optional<std::string&> reasonIfUnsupported) const
226 {
228  reasonIfUnsupported,
229  input,
230  output,
231  mean,
232  var,
233  beta,
234  gamma,
235  descriptor,
236  nullptr);
237 }
238 
240  const TensorInfo& output,
241  const BatchToSpaceNdDescriptor& descriptor,
242  Optional<std::string&> reasonIfUnsupported) const
243 {
245  reasonIfUnsupported,
246  input,
247  output,
248  descriptor);
249 }
250 
252  const TensorInfo& output,
253  Optional<std::string&> reasonIfUnsupported) const
254 {
256  reasonIfUnsupported,
257  input,
258  output);
259 }
260 
262  const TensorInfo& output,
263  const ChannelShuffleDescriptor& descriptor,
264  Optional<std::string&> reasonIfUnsupported) const
265 {
267  reasonIfUnsupported,
268  input,
269  output,
270  descriptor);
271 }
272 
274  const TensorInfo& input1,
275  const TensorInfo& output,
276  const ComparisonDescriptor& descriptor,
277  Optional<std::string&> reasonIfUnsupported) const
278 {
280  reasonIfUnsupported,
281  input0,
282  input1,
283  output,
284  descriptor);
285 }
286 
287 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
288  const TensorInfo& output,
289  const ConcatDescriptor& descriptor,
290  Optional<std::string&> reasonIfUnsupported) const
291 {
292  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
293  {
294  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
295  return false;
296  }
297 
298  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
299  if(concatInnerAxis < 3) // Width, height, or channels
300  {
302  reasonIfUnsupported,
303  inputs,
304  output,
305  descriptor);
306  }
307  else if (concatInnerAxis == 3)
308  {
309  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
310  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
311  for (auto& input : inputs)
312  {
313  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
314  {
315  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
316  return false;
317  }
318  }
319  return true; // Sub-tensors support concat along batch
320  }
321  else // > 4 dimensions not supported.
322  {
323  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
324  return false;
325  }
326 }
327 
329  Optional<std::string&> reasonIfUnsupported) const
330 {
332  reasonIfUnsupported,
333  output);
334 }
335 
337  const TensorInfo& output,
338  Optional<std::string&> reasonIfUnsupported) const
339 {
341  reasonIfUnsupported,
342  input,
343  output);
344 }
345 
347  const TensorInfo& output,
348  Optional<std::string&> reasonIfUnsupported) const
349 {
351  reasonIfUnsupported,
352  input,
353  output);
354 }
355 
357  const TensorInfo& output,
358  const Convolution2dDescriptor& descriptor,
359  const TensorInfo& weights,
360  const Optional<TensorInfo>& biases,
361  Optional<std::string&> reasonIfUnsupported) const
362 {
363  bool isFastMathEnabled = false;
364 #if defined(ARMCOMPUTECL_ENABLED)
365  if (m_ModelContextPtr)
366  {
367  if (m_ModelContextPtr.get() != nullptr)
368  {
369  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
370  if (modelOptions)
371  {
372  isFastMathEnabled = modelOptions->IsFastMathEnabled();
373  }
374  }
375  }
376 #endif
377 
379  reasonIfUnsupported,
380  input,
381  output,
382  descriptor,
383  weights,
384  biases,
385  isFastMathEnabled,
386  nullptr);
387 }
388 
390  const TensorInfo& output,
391  const Convolution3dDescriptor& descriptor,
392  const TensorInfo& weights,
393  const Optional<TensorInfo>& biases,
394  Optional<std::string&> reasonIfUnsupported) const
395 {
396  bool isFastMathEnabled = false;
397 #if defined(ARMCOMPUTECL_ENABLED)
398  if (m_ModelContextPtr)
399 {
400  if (m_ModelContextPtr.get() != nullptr)
401  {
402  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
403  if (modelOptions)
404  {
405  isFastMathEnabled = modelOptions->IsFastMathEnabled();
406  }
407  }
408 }
409 #endif
410 
412  reasonIfUnsupported,
413  input,
414  output,
415  descriptor,
416  weights,
417  biases,
418  isFastMathEnabled,
419  nullptr);
420 }
421 
423  const TensorInfo& output,
424  Optional<std::string&> reasonIfUnsupported) const
425 {
427  reasonIfUnsupported,
428  input,
429  output);
430 }
431 
433  const TensorInfo& output,
434  const DepthToSpaceDescriptor& descriptor,
435  Optional<std::string&> reasonIfUnsupported) const
436 {
438  reasonIfUnsupported,
439  input,
440  output,
441  descriptor);
442 }
443 
445  const TensorInfo& output,
446  const DepthwiseConvolution2dDescriptor& descriptor,
447  const TensorInfo& weights,
448  const Optional<TensorInfo>& biases,
449  Optional<std::string&> reasonIfUnsupported) const
450 {
452  reasonIfUnsupported,
453  input,
454  output,
455  descriptor,
456  weights,
457  biases,
458  nullptr);
459 }
460 
462  const TensorInfo& output,
463  const DepthwiseConvolution2dDescriptor& descriptor,
464  const TensorInfo& weights,
465  const Optional<TensorInfo>& biases,
466  Optional<std::string&> reasonIfUnsupported) const
467 {
469  reasonIfUnsupported,
470  input,
471  output,
472  descriptor,
473  weights,
474  biases,
475  nullptr);
476 }
477 
478 
480  const TensorInfo& input1,
481  const TensorInfo& output,
482  Optional<std::string&> reasonIfUnsupported) const
483 {
485  reasonIfUnsupported,
486  input0,
487  input1,
488  output,
489  nullptr);
490 }
491 
493  const TensorInfo& output,
494  const ElementwiseUnaryDescriptor& descriptor,
495  Optional<std::string&> reasonIfUnsupported) const
496 {
497  switch(descriptor.m_Operation)
498  {
499  case UnaryOperation::Abs:
501  reasonIfUnsupported,
502  input,
503  output);
504  case UnaryOperation::Exp:
506  reasonIfUnsupported,
507  input,
508  output);
509  case UnaryOperation::Log:
511  reasonIfUnsupported,
512  input,
513  output);
516  reasonIfUnsupported,
517  input,
518  output);
519  case UnaryOperation::Neg:
521  reasonIfUnsupported,
522  input,
523  output);
526  reasonIfUnsupported,
527  input,
528  output);
529  case UnaryOperation::Sin:
531  reasonIfUnsupported,
532  input,
533  output);
534  default:
535  return false;
536  }
537 }
538 
540  const TensorInfo& output,
541  const FillDescriptor& descriptor,
542  Optional<std::string&> reasonIfUnsupported) const
543 {
544  armnn::IgnoreUnused(input);
545  armnn::IgnoreUnused(output);
546  armnn::IgnoreUnused(descriptor);
547 
548  return IsClBackendSupported(reasonIfUnsupported);
549 }
550 
552  const TensorInfo& output,
553  Optional<std::string&> reasonIfUnsupported) const
554 {
556  reasonIfUnsupported,
557  input,
558  output);
559 }
560 
562  const TensorInfo& output,
563  const TensorInfo& weights,
564  const TensorInfo& biases,
565  const FullyConnectedDescriptor& descriptor,
566  Optional<std::string&> reasonIfUnsupported) const
567 {
569  reasonIfUnsupported,
570  input,
571  output,
572  weights,
573  biases,
574  descriptor,
575  nullptr);
576 }
577 
579  const TensorInfo& input1,
580  const TensorInfo& output,
581  const GatherDescriptor& descriptor,
582  Optional<std::string&> reasonIfUnsupported) const
583 {
585  reasonIfUnsupported,
586  input0,
587  input1,
588  output,
589  descriptor);
590 }
591 
593  Optional<std::string&> reasonIfUnsupported) const
594 {
595  return IsClBackendSupported(reasonIfUnsupported, input);
596 }
597 
599  const TensorInfo& output,
600  const InstanceNormalizationDescriptor& descriptor,
601  Optional<std::string&> reasonIfUnsupported) const
602 {
604  reasonIfUnsupported,
605  input,
606  output,
607  descriptor);
608 }
609 
611  const TensorInfo& output,
612  const L2NormalizationDescriptor& descriptor,
613  Optional<std::string&> reasonIfUnsupported) const
614 {
616  reasonIfUnsupported,
617  input,
618  output,
619  descriptor);
620 }
621 
623  const TensorInfo& input1,
624  const TensorInfo& output,
625  const LogicalBinaryDescriptor& descriptor,
626  Optional<std::string&> reasonIfUnsupported) const
627 {
628  IgnoreUnused(output);
629 
630  switch(descriptor.m_Operation)
631  {
634  reasonIfUnsupported,
635  input0,
636  input1,
637  output);
640  reasonIfUnsupported,
641  input0,
642  input1,
643  output);
644  default:
645  return false;
646  }
647 }
648 
649 
651  const TensorInfo& output,
652  const LogSoftmaxDescriptor& descriptor,
653  Optional<std::string&> reasonIfUnsupported) const
654 {
656  reasonIfUnsupported,
657  input,
658  output,
659  descriptor);
660 }
661 
663  const TensorInfo& outputStateIn,
664  const TensorInfo& cellStateIn,
665  const TensorInfo& scratchBuffer,
666  const TensorInfo& outputStateOut,
667  const TensorInfo& cellStateOut,
668  const TensorInfo& output,
669  const LstmDescriptor& descriptor,
670  const LstmInputParamsInfo& paramsInfo,
671  Optional<std::string&> reasonIfUnsupported) const
672 {
674  reasonIfUnsupported,
675  input,
676  outputStateIn,
677  cellStateIn,
678  scratchBuffer,
679  outputStateOut,
680  cellStateOut,
681  output,
682  descriptor,
683  paramsInfo);
684 }
685 
687  const TensorInfo& input1,
688  const TensorInfo& output,
689  Optional<std::string&> reasonIfUnsupported) const
690 {
692  reasonIfUnsupported,
693  input0,
694  input1,
695  output);
696 }
697 
699  const TensorInfo& output,
700  const MeanDescriptor& descriptor,
701  Optional<std::string&> reasonIfUnsupported) const
702 {
704  reasonIfUnsupported,
705  input,
706  output,
707  descriptor);
708 }
709 
711  const TensorInfo& input1,
712  const TensorInfo& output,
713  Optional<std::string&> reasonIfUnsupported) const
714 {
716  reasonIfUnsupported,
717  input0,
718  input1,
719  output);
720 }
721 
723  const TensorInfo& input1,
724  const TensorInfo& output,
725  Optional<std::string&> reasonIfUnsupported) const
726 {
728  reasonIfUnsupported,
729  input0,
730  input1,
731  output,
732  nullptr);
733 }
734 
736  const TensorInfo& output,
737  const NormalizationDescriptor& descriptor,
738  Optional<std::string&> reasonIfUnsupported) const
739 {
740  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
741 }
742 
744  Optional<std::string&> reasonIfUnsupported) const
745 {
746  return IsClBackendSupported(reasonIfUnsupported, output);
747 }
748 
750  const TensorInfo& output,
751  const PadDescriptor& descriptor,
752  Optional<std::string&> reasonIfUnsupported) const
753 {
755  reasonIfUnsupported,
756  input,
757  output,
758  descriptor);
759 }
760 
762  const TensorInfo& output,
763  const PermuteDescriptor& descriptor,
764  Optional<std::string&> reasonIfUnsupported) const
765 {
766  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
767 }
768 
770  const TensorInfo& output,
771  const Pooling2dDescriptor& descriptor,
772  Optional<std::string&> reasonIfUnsupported) const
773 {
774  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
775 }
776 
778  const armnn::TensorInfo &alpha,
779  const armnn::TensorInfo &output,
780  armnn::Optional<std::string &> reasonIfUnsupported) const
781 {
782  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
783 }
784 
786  const TensorInfo& previousOutputIn,
787  const TensorInfo& previousCellStateIn,
788  const TensorInfo& outputStateOut,
789  const TensorInfo& cellStateOut,
790  const TensorInfo& output,
791  const QLstmDescriptor& descriptor,
792  const LstmInputParamsInfo& paramsInfo,
793  Optional<std::string&> reasonIfUnsupported) const
794 {
795  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
796  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
797  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
798  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
799  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
801  {
803  reasonIfUnsupported,
804  input,
805  previousCellStateIn,
806  previousOutputIn,
807  cellStateOut,
808  outputStateOut,
809  output,
810  descriptor,
811  paramsInfo);
812  }
813  else
814  {
815  return false;
816  }
817 }
818 
820  const TensorInfo& previousCellStateIn,
821  const TensorInfo& previousOutputIn,
822  const TensorInfo& cellStateOut,
823  const TensorInfo& output,
824  const QuantizedLstmInputParamsInfo& paramsInfo,
825  Optional<std::string&> reasonIfUnsupported) const
826 {
828  reasonIfUnsupported,
829  input,
830  previousCellStateIn,
831  previousOutputIn,
832  cellStateOut,
833  output,
834  paramsInfo);
835 }
836 
838  const TensorInfo& output,
839  Optional<std::string&> reasonIfUnsupported) const
840 {
842  reasonIfUnsupported,
843  input,
844  output);
845 }
846 
848  const TensorInfo& output,
849  const ReduceDescriptor& descriptor,
850  Optional<std::string&> reasonIfUnsupported) const
851 {
853  reasonIfUnsupported,
854  input,
855  output,
856  descriptor);
857 }
858 
860  const TensorInfo& output,
861  const ReshapeDescriptor& descriptor,
862  Optional<std::string&> reasonIfUnsupported) const
863 {
864  IgnoreUnused(descriptor);
865  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
866 }
867 
869  const TensorInfo& output,
870  const ResizeDescriptor& descriptor,
871  Optional<std::string&> reasonIfUnsupported) const
872 {
873  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
874 }
875 
877  const TensorInfo& output,
878  const SliceDescriptor& descriptor,
879  Optional<std::string&> reasonIfUnsupported) const
880 {
881  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
882 }
883 
885  const TensorInfo& output,
886  const SoftmaxDescriptor& descriptor,
887  Optional<std::string&> reasonIfUnsupported) const
888 {
889  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
890 }
891 
893  const TensorInfo& output,
894  const SpaceToBatchNdDescriptor& descriptor,
895  Optional<std::string&> reasonIfUnsupported) const
896 {
898  reasonIfUnsupported,
899  input,
900  output,
901  descriptor);
902 }
903 
905  const TensorInfo& output,
906  const SpaceToDepthDescriptor& descriptor,
907  Optional<std::string&> reasonIfUnsupported) const
908 {
910  reasonIfUnsupported,
911  input,
912  output,
913  descriptor);
914 }
915 
917  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
918  const ViewsDescriptor& descriptor,
919  Optional<std::string&> reasonIfUnsupported) const
920 {
921 #if defined(ARMCOMPUTECL_ENABLED)
922  // Split along the last dimension, cannot use sub-tensors
923  // as width and height of the sub-tensors do not match
924  // the width and height of the parent tensor
925  // in case of input with more than 2D.
926  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
927  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
928  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
929  {
931  reasonIfUnsupported,
932  input,
933  outputs,
934  *splitAxis.begin());
935  }
936 #endif
937  IgnoreUnused(descriptor);
938  for (auto output : outputs)
939  {
940  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
941  {
942  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
943  return false;
944  }
945  }
946  return true;
947 }
948 
949 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
950  const TensorInfo& output,
951  const StackDescriptor& descriptor,
952  Optional<std::string&> reasonIfUnsupported) const
953 {
955  reasonIfUnsupported,
956  inputs,
957  output,
958  descriptor);
959 }
960 
962  const TensorInfo& output,
963  const StridedSliceDescriptor& descriptor,
964  Optional<std::string&> reasonIfUnsupported) const
965 {
967  reasonIfUnsupported,
968  input,
969  output,
970  descriptor);
971 }
972 
974  const TensorInfo& input1,
975  const TensorInfo& output,
976  Optional<std::string&> reasonIfUnsupported) const
977 {
979  reasonIfUnsupported,
980  input0,
981  input1,
982  output,
983  nullptr);
984 }
985 
987  const TensorInfo& output,
988  const TransposeConvolution2dDescriptor& descriptor,
989  const TensorInfo& weights,
990  const Optional<TensorInfo>& biases,
991  Optional<std::string&> reasonIfUnsupported) const
992 {
994  reasonIfUnsupported,
995  input,
996  output,
997  descriptor,
998  weights,
999  biases);
1000 }
1001 
1003  const TensorInfo& output,
1004  const TransposeDescriptor& descriptor,
1005  Optional<std::string&> reasonIfUnsupported) const
1006 {
1007  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1008 }
1009 
1010 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:434
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
DataType
Definition: Types.hpp:35
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A GatherDescriptor for the GatherLayer.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Status
enumeration
Definition: Types.hpp:29
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)