ArmNN
 20.08
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
8 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
16 #if defined(ARMCOMPUTECL_ENABLED)
70 #endif
71 
72 
73 namespace armnn
74 {
75 
76 namespace
77 {
78 
79 template<unsigned int FilterSize>
80 bool IsMatchingSize2d(const TensorInfo& weightInfo)
81 {
82  // Width & Height must match.
83  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
84 }
85 
86 template<uint32_t ValidStride>
87 bool IsMatchingStride(uint32_t actualStride)
88 {
89  return ValidStride == actualStride;
90 }
91 
92 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
93 bool IsMatchingStride(uint32_t actualStride)
94 {
95  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
96 }
97 
98 template<typename ... Args>
99 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
100 {
101  IgnoreUnused(reasonIfUnsupported, (args)...);
102 #if defined(ARMCOMPUTECL_ENABLED)
103  return true;
104 #else
105  if (reasonIfUnsupported)
106  {
107  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
108  }
109  return false;
110 #endif
111 }
112 
113 #if defined(ARMCOMPUTECL_ENABLED)
114 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
115 #else
116 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
117 #endif
118 
119 #if defined(ARMCOMPUTECL_ENABLED)
120 template<class FuncType, class... Args>
121 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
122 {
123  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
124  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
125  if (!supported && reasonIfUnsupported)
126  {
127  reasonIfUnsupported.value() = aclStatus.error_description();
128  }
129  return supported;
130 }
131 
132 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
133  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
134 #else
135 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
136  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
137 #endif
138 
139 template<typename FloatFunc, typename Uint8Func, typename ... Params>
140 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
141  DataType dataType,
142  FloatFunc floatFuncPtr,
143  Uint8Func uint8FuncPtr,
144  Params&&... params)
145 {
146  return IsClBackendSupported(reasonIfUnsupported) &&
147  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
148  dataType,
149  floatFuncPtr,
150  floatFuncPtr,
151  uint8FuncPtr,
152  &FalseFunc<>,
153  &FalseFunc<>,
154  std::forward<Params>(params)...);
155 }
156 } // anonymous namespace
157 
159  const TensorInfo& output,
160  Optional<std::string&> reasonIfUnsupported) const
161 {
163  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
164 }
165 
167  const TensorInfo& output,
168  const ActivationDescriptor& descriptor,
169  Optional<std::string&> reasonIfUnsupported) const
170 {
172  reasonIfUnsupported,
173  input,
174  output,
175  descriptor);
176 }
177 
179  const TensorInfo& input1,
180  const TensorInfo& output,
181  Optional<std::string&> reasonIfUnsupported) const
182 {
184  reasonIfUnsupported,
185  input0,
186  input1,
187  output);
188 }
189 
191  const TensorInfo& output,
192  const ArgMinMaxDescriptor& descriptor,
193  Optional<std::string&> reasonIfUnsupported) const
194 {
195 
197  reasonIfUnsupported,
198  input,
199  output,
200  descriptor);
201 }
202 
204  const TensorInfo& output,
205  const TensorInfo& mean,
206  const TensorInfo& var,
207  const TensorInfo& beta,
208  const TensorInfo& gamma,
209  const BatchNormalizationDescriptor& descriptor,
210  Optional<std::string&> reasonIfUnsupported) const
211 {
213  reasonIfUnsupported,
214  input,
215  output,
216  mean,
217  var,
218  beta,
219  gamma,
220  descriptor);
221 }
222 
224  const TensorInfo& output,
225  const BatchToSpaceNdDescriptor& descriptor,
226  Optional<std::string&> reasonIfUnsupported) const
227 {
229  reasonIfUnsupported,
230  input,
231  output,
232  descriptor);
233 }
234 
236  const TensorInfo& input1,
237  const TensorInfo& output,
238  const ComparisonDescriptor& descriptor,
239  Optional<std::string&> reasonIfUnsupported) const
240 {
242  reasonIfUnsupported,
243  input0,
244  input1,
245  output,
246  descriptor);
247 }
248 
249 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
250  const TensorInfo& output,
251  const ConcatDescriptor& descriptor,
252  Optional<std::string&> reasonIfUnsupported) const
253 {
254  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
255  {
256  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
257  return false;
258  }
259 
260  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
261  if(concatInnerAxis < 3) // Width, height, or channels
262  {
264  reasonIfUnsupported,
265  inputs,
266  output,
267  descriptor);
268  }
269  else if (concatInnerAxis == 3)
270  {
271  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
272  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
273  for (auto& input : inputs)
274  {
275  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
276  {
277  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
278  return false;
279  }
280  }
281  return true; // Sub-tensors support concat along batch
282  }
283  else // > 4 dimensions not supported.
284  {
285  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
286  return false;
287  }
288 }
289 
291  Optional<std::string&> reasonIfUnsupported) const
292 {
294  reasonIfUnsupported,
295  output);
296 }
297 
299  const TensorInfo& output,
300  Optional<std::string&> reasonIfUnsupported) const
301 {
303  reasonIfUnsupported,
304  input,
305  output);
306 }
307 
309  const TensorInfo& output,
310  Optional<std::string&> reasonIfUnsupported) const
311 {
313  reasonIfUnsupported,
314  input,
315  output);
316 }
317 
319  const TensorInfo& output,
320  const Convolution2dDescriptor& descriptor,
321  const TensorInfo& weights,
322  const Optional<TensorInfo>& biases,
323  Optional<std::string&> reasonIfUnsupported) const
324 {
326  reasonIfUnsupported,
327  input,
328  output,
329  descriptor,
330  weights,
331  biases);
332 }
333 
335  const TensorInfo& output,
336  Optional<std::string&> reasonIfUnsupported) const
337 {
339  reasonIfUnsupported,
340  input,
341  output);
342 }
343 
345  const TensorInfo& output,
346  const DepthToSpaceDescriptor& descriptor,
347  Optional<std::string&> reasonIfUnsupported) const
348 {
350  reasonIfUnsupported,
351  input,
352  output,
353  descriptor);
354 }
355 
357  const TensorInfo& output,
358  const DepthwiseConvolution2dDescriptor& descriptor,
359  const TensorInfo& weights,
360  const Optional<TensorInfo>& biases,
361  Optional<std::string&> reasonIfUnsupported) const
362 {
364  reasonIfUnsupported,
365  input,
366  output,
367  descriptor,
368  weights,
369  biases);
370 }
371 
373  const TensorInfo& output,
374  const DepthwiseConvolution2dDescriptor& descriptor,
375  const TensorInfo& weights,
376  const Optional<TensorInfo>& biases,
377  Optional<std::string&> reasonIfUnsupported) const
378 {
380  reasonIfUnsupported,
381  input,
382  output,
383  descriptor,
384  weights,
385  biases);
386 }
387 
388 
390  const TensorInfo& input1,
391  const TensorInfo& output,
392  Optional<std::string&> reasonIfUnsupported) const
393 {
395  reasonIfUnsupported,
396  input0,
397  input1,
398  output);
399 }
400 
402  const TensorInfo& output,
403  const ElementwiseUnaryDescriptor& descriptor,
404  Optional<std::string&> reasonIfUnsupported) const
405 {
406  switch(descriptor.m_Operation)
407  {
408  case UnaryOperation::Abs:
410  reasonIfUnsupported,
411  input,
412  output);
413  case UnaryOperation::Exp:
415  reasonIfUnsupported,
416  input,
417  output);
418  case UnaryOperation::Neg:
420  reasonIfUnsupported,
421  input,
422  output);
425  reasonIfUnsupported,
426  input,
427  output);
428  default:
429  return false;
430  }
431 }
432 
434  const TensorInfo& output,
435  const FillDescriptor& descriptor,
436  Optional<std::string&> reasonIfUnsupported) const
437 {
438  armnn::IgnoreUnused(input);
439  armnn::IgnoreUnused(output);
440  armnn::IgnoreUnused(descriptor);
441 
442  return IsClBackendSupported(reasonIfUnsupported);
443 }
444 
446  const TensorInfo& output,
447  Optional<std::string&> reasonIfUnsupported) const
448 {
450  reasonIfUnsupported,
451  input,
452  output);
453 }
454 
456  const TensorInfo& output,
457  const TensorInfo& weights,
458  const TensorInfo& biases,
459  const FullyConnectedDescriptor& descriptor,
460  Optional<std::string&> reasonIfUnsupported) const
461 {
463  reasonIfUnsupported,
464  input,
465  output,
466  weights,
467  biases,
468  descriptor);
469 }
470 
472  const TensorInfo& input1,
473  const TensorInfo& output,
474  const GatherDescriptor& descriptor,
475  Optional<std::string&> reasonIfUnsupported) const
476 {
478  reasonIfUnsupported,
479  input0,
480  input1,
481  output,
482  descriptor);
483 }
484 
486  const TensorInfo& input1,
487  const TensorInfo& output,
488  Optional<std::string&> reasonIfUnsupported) const
489 {
491  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
492 }
493 
495  Optional<std::string&> reasonIfUnsupported) const
496 {
497  return IsClBackendSupported(reasonIfUnsupported, input);
498 }
499 
501  const TensorInfo& output,
502  const InstanceNormalizationDescriptor& descriptor,
503  Optional<std::string&> reasonIfUnsupported) const
504 {
506  reasonIfUnsupported,
507  input,
508  output,
509  descriptor);
510 }
511 
513  const TensorInfo& output,
514  const L2NormalizationDescriptor& descriptor,
515  Optional<std::string&> reasonIfUnsupported) const
516 {
518  reasonIfUnsupported,
519  input,
520  output,
521  descriptor);
522 }
523 
525  const TensorInfo& output,
526  const LogSoftmaxDescriptor& descriptor,
527  Optional<std::string&> reasonIfUnsupported) const
528 {
530  reasonIfUnsupported,
531  input,
532  output,
533  descriptor);
534 }
535 
537  const TensorInfo& outputStateIn,
538  const TensorInfo& cellStateIn,
539  const TensorInfo& scratchBuffer,
540  const TensorInfo& outputStateOut,
541  const TensorInfo& cellStateOut,
542  const TensorInfo& output,
543  const LstmDescriptor& descriptor,
544  const LstmInputParamsInfo& paramsInfo,
545  Optional<std::string&> reasonIfUnsupported) const
546 {
548  reasonIfUnsupported,
549  input,
550  outputStateIn,
551  cellStateIn,
552  scratchBuffer,
553  outputStateOut,
554  cellStateOut,
555  output,
556  descriptor,
557  paramsInfo);
558 }
559 
561  const TensorInfo& input1,
562  const TensorInfo& output,
563  Optional<std::string&> reasonIfUnsupported) const
564 {
566  reasonIfUnsupported,
567  input0,
568  input1,
569  output);
570 }
571 
573  const TensorInfo& output,
574  const MeanDescriptor& descriptor,
575  Optional<std::string&> reasonIfUnsupported) const
576 {
578  reasonIfUnsupported,
579  input,
580  output,
581  descriptor);
582 }
583 
584 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
585  const TensorInfo& output,
586  const MergerDescriptor& descriptor,
587  Optional<std::string&> reasonIfUnsupported) const
588 {
589  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
590 }
591 
593  const TensorInfo& input1,
594  const TensorInfo& output,
595  Optional<std::string&> reasonIfUnsupported) const
596 {
598  reasonIfUnsupported,
599  input0,
600  input1,
601  output);
602 }
603 
605  const TensorInfo& input1,
606  const TensorInfo& output,
607  Optional<std::string&> reasonIfUnsupported) const
608 {
610  reasonIfUnsupported,
611  input0,
612  input1,
613  output);
614 }
615 
617  const TensorInfo& output,
618  const NormalizationDescriptor& descriptor,
619  Optional<std::string&> reasonIfUnsupported) const
620 {
621  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
622 }
623 
625  Optional<std::string&> reasonIfUnsupported) const
626 {
627  return IsClBackendSupported(reasonIfUnsupported, output);
628 }
629 
631  const TensorInfo& output,
632  const PadDescriptor& descriptor,
633  Optional<std::string&> reasonIfUnsupported) const
634 {
636  reasonIfUnsupported,
637  input,
638  output,
639  descriptor);
640 }
641 
643  const TensorInfo& output,
644  const PermuteDescriptor& descriptor,
645  Optional<std::string&> reasonIfUnsupported) const
646 {
647  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
648 }
649 
651  const TensorInfo& output,
652  const Pooling2dDescriptor& descriptor,
653  Optional<std::string&> reasonIfUnsupported) const
654 {
655  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
656 }
657 
659  const armnn::TensorInfo &alpha,
660  const armnn::TensorInfo &output,
661  armnn::Optional<std::string &> reasonIfUnsupported) const
662 {
663  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
664 }
665 
667  const TensorInfo& previousOutputIn,
668  const TensorInfo& previousCellStateIn,
669  const TensorInfo& outputStateOut,
670  const TensorInfo& cellStateOut,
671  const TensorInfo& output,
672  const QLstmDescriptor& descriptor,
673  const LstmInputParamsInfo& paramsInfo,
674  Optional<std::string&> reasonIfUnsupported) const
675 {
676  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
677  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
678  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
679  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
680  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
682  {
684  reasonIfUnsupported,
685  input,
686  previousCellStateIn,
687  previousOutputIn,
688  cellStateOut,
689  outputStateOut,
690  output,
691  descriptor,
692  paramsInfo);
693  }
694  else
695  {
696  return false;
697  }
698 }
699 
701  const TensorInfo& previousCellStateIn,
702  const TensorInfo& previousOutputIn,
703  const TensorInfo& cellStateOut,
704  const TensorInfo& output,
705  const QuantizedLstmInputParamsInfo& paramsInfo,
706  Optional<std::string&> reasonIfUnsupported) const
707 {
709  reasonIfUnsupported,
710  input,
711  previousCellStateIn,
712  previousOutputIn,
713  cellStateOut,
714  output,
715  paramsInfo);
716 }
717 
719  const TensorInfo& output,
720  Optional<std::string&> reasonIfUnsupported) const
721 {
723  reasonIfUnsupported,
724  input,
725  output);
726 }
727 
729  const TensorInfo& output,
730  const ReshapeDescriptor& descriptor,
731  Optional<std::string&> reasonIfUnsupported) const
732 {
733  IgnoreUnused(descriptor);
734  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
735 }
736 
738  const TensorInfo& output,
739  const ResizeDescriptor& descriptor,
740  Optional<std::string&> reasonIfUnsupported) const
741 {
742  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
743 }
744 
746  const TensorInfo& output,
747  Optional<std::string&> reasonIfUnsupported) const
748 {
749  ResizeDescriptor descriptor;
750  descriptor.m_Method = ResizeMethod::Bilinear;
751  descriptor.m_DataLayout = DataLayout::NCHW;
752 
753  const TensorShape& outputShape = output.GetShape();
754  descriptor.m_TargetHeight = outputShape[2];
755  descriptor.m_TargetWidth = outputShape[3];
756 
757  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
758 }
759 
761  const TensorInfo& output,
762  Optional<std::string&> reasonIfUnsupported) const
763 {
765  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
766 }
767 
769  const TensorInfo& output,
770  const SliceDescriptor& descriptor,
771  Optional<std::string&> reasonIfUnsupported) const
772 {
773  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
774 }
775 
777  const TensorInfo& output,
778  const SoftmaxDescriptor& descriptor,
779  Optional<std::string&> reasonIfUnsupported) const
780 {
781  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
782 }
783 
785  const TensorInfo& output,
786  const SpaceToBatchNdDescriptor& descriptor,
787  Optional<std::string&> reasonIfUnsupported) const
788 {
790  reasonIfUnsupported,
791  input,
792  output,
793  descriptor);
794 }
795 
797  const TensorInfo& output,
798  const SpaceToDepthDescriptor& descriptor,
799  Optional<std::string&> reasonIfUnsupported) const
800 {
802  reasonIfUnsupported,
803  input,
804  output,
805  descriptor);
806 }
807 
809  const ViewsDescriptor& descriptor,
810  Optional<std::string&> reasonIfUnsupported) const
811 {
812  IgnoreUnused(descriptor);
813  return IsSupportedForDataTypeCl(reasonIfUnsupported,
814  input.GetDataType(),
815  &TrueFunc<>,
816  &TrueFunc<>);
817 }
818 
820  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
821  const ViewsDescriptor& descriptor,
822  Optional<std::string&> reasonIfUnsupported) const
823 {
824 #if defined(ARMCOMPUTECL_ENABLED)
825  // Split along the last dimension, cannot use sub-tensors
826  // as width and height of the sub-tensors do not match
827  // the width and height of the parent tensor
828  // in case of input with more than 2D.
829  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
830  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
831  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
832  {
834  reasonIfUnsupported,
835  input,
836  outputs,
837  *splitAxis.begin());
838  }
839 #endif
840  IgnoreUnused(descriptor);
841  for (auto output : outputs)
842  {
843  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
844  {
845  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
846  return false;
847  }
848  }
849  return true;
850 }
851 
852 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
853  const TensorInfo& output,
854  const StackDescriptor& descriptor,
855  Optional<std::string&> reasonIfUnsupported) const
856 {
858  reasonIfUnsupported,
859  inputs,
860  output,
861  descriptor);
862 }
863 
865  const TensorInfo& output,
866  const StridedSliceDescriptor& descriptor,
867  Optional<std::string&> reasonIfUnsupported) const
868 {
870  reasonIfUnsupported,
871  input,
872  output,
873  descriptor);
874 }
875 
877  const TensorInfo& input1,
878  const TensorInfo& output,
879  Optional<std::string&> reasonIfUnsupported) const
880 {
882  reasonIfUnsupported,
883  input0,
884  input1,
885  output);
886 }
887 
889  const TensorInfo& output,
890  const TransposeConvolution2dDescriptor& descriptor,
891  const TensorInfo& weights,
892  const Optional<TensorInfo>& biases,
893  Optional<std::string&> reasonIfUnsupported) const
894 {
896  reasonIfUnsupported,
897  input,
898  output,
899  descriptor,
900  weights,
901  biases);
902 }
903 
905  const TensorInfo& output,
906  const TransposeDescriptor& descriptor,
907  Optional<std::string&> reasonIfUnsupported) const
908 {
909  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
910 }
911 
912 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &desc)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:424
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &desc)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:70
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:32
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:90
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &desc)
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override