ArmNN
 20.02
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
8 
10 #include <armnn/Descriptors.hpp>
12 
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15 
16 #if defined(ARMCOMPUTECL_ENABLED)
63 #endif
64 
65 
66 namespace armnn
67 {
68 
69 namespace
70 {
71 
72 template<unsigned int FilterSize>
73 bool IsMatchingSize2d(const TensorInfo& weightInfo)
74 {
75  // Width & Height must match.
76  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
77 }
78 
79 template<uint32_t ValidStride>
80 bool IsMatchingStride(uint32_t actualStride)
81 {
82  return ValidStride == actualStride;
83 }
84 
85 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
86 bool IsMatchingStride(uint32_t actualStride)
87 {
88  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
89 }
90 
91 template<typename ... Args>
92 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
93 {
94  IgnoreUnused(reasonIfUnsupported, (args)...);
95 #if defined(ARMCOMPUTECL_ENABLED)
96  return true;
97 #else
98  if (reasonIfUnsupported)
99  {
100  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
101  }
102  return false;
103 #endif
104 }
105 
106 #if defined(ARMCOMPUTECL_ENABLED)
107 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
108 #else
109 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
110 #endif
111 
112 #if defined(ARMCOMPUTECL_ENABLED)
113 template<class FuncType, class... Args>
114 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
115 {
116  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
117  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
118  if (!supported && reasonIfUnsupported)
119  {
120  reasonIfUnsupported.value() = aclStatus.error_description();
121  }
122  return supported;
123 }
124 
125 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
126  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
127 #else
128 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
129  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
130 #endif
131 
132 template<typename FloatFunc, typename Uint8Func, typename ... Params>
133 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
134  DataType dataType,
135  FloatFunc floatFuncPtr,
136  Uint8Func uint8FuncPtr,
137  Params&&... params)
138 {
139  return IsClBackendSupported(reasonIfUnsupported) &&
140  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
141  dataType,
142  floatFuncPtr,
143  floatFuncPtr,
144  uint8FuncPtr,
145  &FalseFunc<>,
146  &FalseFunc<>,
147  std::forward<Params>(params)...);
148 }
149 } // anonymous namespace
150 
152  const TensorInfo& output,
153  Optional<std::string&> reasonIfUnsupported) const
154 {
156  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
157 }
158 
160  const TensorInfo& output,
161  const ActivationDescriptor& descriptor,
162  Optional<std::string&> reasonIfUnsupported) const
163 {
165  reasonIfUnsupported,
166  input,
167  output,
168  descriptor);
169 }
170 
172  const TensorInfo& input1,
173  const TensorInfo& output,
174  Optional<std::string&> reasonIfUnsupported) const
175 {
177  reasonIfUnsupported,
178  input0,
179  input1,
180  output);
181 }
182 
184  const TensorInfo& output,
185  const ArgMinMaxDescriptor& descriptor,
186  Optional<std::string&> reasonIfUnsupported) const
187 {
188 
190  reasonIfUnsupported,
191  input,
192  output,
193  descriptor);
194 }
195 
197  const TensorInfo& output,
198  const TensorInfo& mean,
199  const TensorInfo& var,
200  const TensorInfo& beta,
201  const TensorInfo& gamma,
202  const BatchNormalizationDescriptor& descriptor,
203  Optional<std::string&> reasonIfUnsupported) const
204 {
206  reasonIfUnsupported,
207  input,
208  output,
209  mean,
210  var,
211  beta,
212  gamma,
213  descriptor);
214 }
215 
217  const TensorInfo& output,
218  const BatchToSpaceNdDescriptor& descriptor,
219  Optional<std::string&> reasonIfUnsupported) const
220 {
222  reasonIfUnsupported,
223  input,
224  output,
225  descriptor);
226 }
227 
229  const TensorInfo& input1,
230  const TensorInfo& output,
231  const ComparisonDescriptor& descriptor,
232  Optional<std::string&> reasonIfUnsupported) const
233 {
234  if (descriptor.m_Operation == ComparisonOperation::Greater)
235  {
237  reasonIfUnsupported,
238  input0,
239  input1,
240  output);
241  }
242 
243  return false;
244 }
245 
246 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
247  const TensorInfo& output,
248  const ConcatDescriptor& descriptor,
249  Optional<std::string&> reasonIfUnsupported) const
250 {
251  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
252  {
253  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
254  return false;
255  }
256 
257  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
258  if(concatInnerAxis < 3) // Width, height, or channels
259  {
261  reasonIfUnsupported,
262  inputs,
263  output,
264  descriptor);
265  }
266  else if (concatInnerAxis == 3)
267  {
268  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
269  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
270  for (auto& input : inputs)
271  {
272  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
273  {
274  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
275  return false;
276  }
277  }
278  return true; // Sub-tensors support concat along batch
279  }
280  else // > 4 dimensions not supported.
281  {
282  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
283  return false;
284  }
285 }
286 
288  Optional<std::string&> reasonIfUnsupported) const
289 {
290  return IsSupportedForDataTypeCl(reasonIfUnsupported,
291  output.GetDataType(),
292  &TrueFunc<>,
293  &TrueFunc<>);
294 }
295 
297  const TensorInfo& output,
298  Optional<std::string&> reasonIfUnsupported) const
299 {
301  reasonIfUnsupported,
302  input,
303  output);
304 }
305 
307  const TensorInfo& output,
308  Optional<std::string&> reasonIfUnsupported) const
309 {
311  reasonIfUnsupported,
312  input,
313  output);
314 }
315 
317  const TensorInfo& output,
318  const Convolution2dDescriptor& descriptor,
319  const TensorInfo& weights,
320  const Optional<TensorInfo>& biases,
321  Optional<std::string&> reasonIfUnsupported) const
322 {
324  reasonIfUnsupported,
325  input,
326  output,
327  descriptor,
328  weights,
329  biases);
330 }
331 
333  const TensorInfo& output,
334  Optional<std::string&> reasonIfUnsupported) const
335 {
337  reasonIfUnsupported,
338  input,
339  output);
340 }
341 
343  const TensorInfo& output,
344  const DepthToSpaceDescriptor& descriptor,
345  Optional<std::string&> reasonIfUnsupported) const
346 {
348  reasonIfUnsupported,
349  input,
350  output,
351  descriptor);
352 }
353 
355  const TensorInfo& output,
356  const DepthwiseConvolution2dDescriptor& descriptor,
357  const TensorInfo& weights,
358  const Optional<TensorInfo>& biases,
359  Optional<std::string&> reasonIfUnsupported) const
360 {
362  reasonIfUnsupported,
363  input,
364  output,
365  descriptor,
366  weights,
367  biases);
368 }
369 
371  const TensorInfo& output,
372  const DepthwiseConvolution2dDescriptor& descriptor,
373  const TensorInfo& weights,
374  const Optional<TensorInfo>& biases,
375  Optional<std::string&> reasonIfUnsupported) const
376 {
378  reasonIfUnsupported,
379  input,
380  output,
381  descriptor,
382  weights,
383  biases);
384 }
385 
386 
388  const TensorInfo& input1,
389  const TensorInfo& output,
390  Optional<std::string&> reasonIfUnsupported) const
391 {
393  reasonIfUnsupported,
394  input0,
395  input1,
396  output);
397 }
398 
400  const TensorInfo& output,
401  const ElementwiseUnaryDescriptor& descriptor,
402  Optional<std::string&> reasonIfUnsupported) const
403 {
404  if (descriptor.m_Operation == UnaryOperation::Abs)
405  {
407  reasonIfUnsupported,
408  input,
409  output);
410  }
411  else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
412  {
414  reasonIfUnsupported,
415  input,
416  output);
417  }
418 
419  return false;
420 }
421 
423  const TensorInfo& output,
424  Optional<std::string&> reasonIfUnsupported) const
425 {
427  reasonIfUnsupported,
428  input,
429  output);
430 }
431 
433  const TensorInfo& output,
434  const TensorInfo& weights,
435  const TensorInfo& biases,
436  const FullyConnectedDescriptor& descriptor,
437  Optional<std::string&> reasonIfUnsupported) const
438 {
440  reasonIfUnsupported,
441  input,
442  output,
443  weights,
444  biases,
445  descriptor);
446 }
447 
449  const TensorInfo& input1,
450  const TensorInfo& output,
451  Optional<std::string&> reasonIfUnsupported) const
452 {
454  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
455 }
456 
458  Optional<std::string&> reasonIfUnsupported) const
459 {
460  return IsClBackendSupported(reasonIfUnsupported, input);
461 }
462 
464  const TensorInfo& output,
465  const InstanceNormalizationDescriptor& descriptor,
466  Optional<std::string&> reasonIfUnsupported) const
467 {
469  reasonIfUnsupported,
470  input,
471  output,
472  descriptor);
473 }
474 
476  const TensorInfo& output,
477  const L2NormalizationDescriptor& descriptor,
478  Optional<std::string&> reasonIfUnsupported) const
479 {
481  reasonIfUnsupported,
482  input,
483  output,
484  descriptor);
485 }
486 
488  const TensorInfo& outputStateIn,
489  const TensorInfo& cellStateIn,
490  const TensorInfo& scratchBuffer,
491  const TensorInfo& outputStateOut,
492  const TensorInfo& cellStateOut,
493  const TensorInfo& output,
494  const LstmDescriptor& descriptor,
495  const LstmInputParamsInfo& paramsInfo,
496  Optional<std::string&> reasonIfUnsupported) const
497 {
499  reasonIfUnsupported,
500  input,
501  outputStateIn,
502  cellStateIn,
503  scratchBuffer,
504  outputStateOut,
505  cellStateOut,
506  output,
507  descriptor,
508  paramsInfo);
509 }
510 
512  const TensorInfo& input1,
513  const TensorInfo& output,
514  Optional<std::string&> reasonIfUnsupported) const
515 {
517  reasonIfUnsupported,
518  input0,
519  input1,
520  output);
521 }
522 
524  const TensorInfo& output,
525  const MeanDescriptor& descriptor,
526  Optional<std::string&> reasonIfUnsupported) const
527 {
529  reasonIfUnsupported,
530  input,
531  output,
532  descriptor);
533 }
534 
535 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
536  const TensorInfo& output,
537  const MergerDescriptor& descriptor,
538  Optional<std::string&> reasonIfUnsupported) const
539 {
540  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
541 }
542 
544  const TensorInfo& input1,
545  const TensorInfo& output,
546  Optional<std::string&> reasonIfUnsupported) const
547 {
549  reasonIfUnsupported,
550  input0,
551  input1,
552  output);
553 }
554 
556  const TensorInfo& input1,
557  const TensorInfo& output,
558  Optional<std::string&> reasonIfUnsupported) const
559 {
561  reasonIfUnsupported,
562  input0,
563  input1,
564  output);
565 }
566 
568  const TensorInfo& output,
569  const NormalizationDescriptor& descriptor,
570  Optional<std::string&> reasonIfUnsupported) const
571 {
572  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
573 }
574 
576  Optional<std::string&> reasonIfUnsupported) const
577 {
578  return IsClBackendSupported(reasonIfUnsupported, output);
579 }
580 
582  const TensorInfo& output,
583  const PadDescriptor& descriptor,
584  Optional<std::string&> reasonIfUnsupported) const
585 {
587  reasonIfUnsupported,
588  input,
589  output,
590  descriptor);
591 }
592 
594  const TensorInfo& output,
595  const PermuteDescriptor& descriptor,
596  Optional<std::string&> reasonIfUnsupported) const
597 {
598  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
599 }
600 
602  const TensorInfo& output,
603  const Pooling2dDescriptor& descriptor,
604  Optional<std::string&> reasonIfUnsupported) const
605 {
606  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
607 }
608 
610  const armnn::TensorInfo &alpha,
611  const armnn::TensorInfo &output,
612  armnn::Optional<std::string &> reasonIfUnsupported) const
613 {
614  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
615 }
616 
618  const TensorInfo& previousCellStateIn,
619  const TensorInfo& previousOutputIn,
620  const TensorInfo& cellStateOut,
621  const TensorInfo& output,
622  const QuantizedLstmInputParamsInfo& paramsInfo,
623  Optional<std::string&> reasonIfUnsupported) const
624 {
626  reasonIfUnsupported,
627  input,
628  previousCellStateIn,
629  previousOutputIn,
630  cellStateOut,
631  output,
632  paramsInfo);
633 }
634 
636  const TensorInfo& output,
637  Optional<std::string&> reasonIfUnsupported) const
638 {
640  reasonIfUnsupported,
641  input,
642  output);
643 }
644 
646  const TensorInfo& output,
647  const ReshapeDescriptor& descriptor,
648  Optional<std::string&> reasonIfUnsupported) const
649 {
650  IgnoreUnused(descriptor);
651  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
652 }
653 
655  const TensorInfo& output,
656  const ResizeDescriptor& descriptor,
657  Optional<std::string&> reasonIfUnsupported) const
658 {
659  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
660 }
661 
663  const TensorInfo& output,
664  Optional<std::string&> reasonIfUnsupported) const
665 {
666  ResizeDescriptor descriptor;
667  descriptor.m_Method = ResizeMethod::Bilinear;
668  descriptor.m_DataLayout = DataLayout::NCHW;
669 
670  const TensorShape& outputShape = output.GetShape();
671  descriptor.m_TargetHeight = outputShape[2];
672  descriptor.m_TargetWidth = outputShape[3];
673 
674  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
675 }
676 
678  const TensorInfo& output,
679  Optional<std::string&> reasonIfUnsupported) const
680 {
682  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
683 }
684 
686  const TensorInfo& output,
687  const SliceDescriptor& descriptor,
688  Optional<std::string&> reasonIfUnsupported) const
689 {
690  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
691 }
692 
694  const TensorInfo& output,
695  const SoftmaxDescriptor& descriptor,
696  Optional<std::string&> reasonIfUnsupported) const
697 {
698  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
699 }
700 
702  const TensorInfo& output,
703  const SpaceToBatchNdDescriptor& descriptor,
704  Optional<std::string&> reasonIfUnsupported) const
705 {
707  reasonIfUnsupported,
708  input,
709  output,
710  descriptor);
711 }
712 
714  const TensorInfo& output,
715  const SpaceToDepthDescriptor& descriptor,
716  Optional<std::string&> reasonIfUnsupported) const
717 {
719  reasonIfUnsupported,
720  input,
721  output,
722  descriptor);
723 }
724 
726  const ViewsDescriptor& descriptor,
727  Optional<std::string&> reasonIfUnsupported) const
728 {
729  IgnoreUnused(descriptor);
730  return IsSupportedForDataTypeCl(reasonIfUnsupported,
731  input.GetDataType(),
732  &TrueFunc<>,
733  &TrueFunc<>);
734 }
735 
737  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
738  const ViewsDescriptor& descriptor,
739  Optional<std::string&> reasonIfUnsupported) const
740 {
741 #if defined(ARMCOMPUTECL_ENABLED)
742  // Split along the last dimension, cannot use sub-tensors
743  // as width and height of the sub-tensors do not match
744  // the width and height of the parent tensor
745  // in case of input with more than 2D.
746  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
747  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
748  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
749  {
751  reasonIfUnsupported,
752  input,
753  outputs,
754  *splitAxis.begin());
755  }
756 #endif
757  IgnoreUnused(descriptor);
758  for (auto output : outputs)
759  {
760  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
761  {
762  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
763  return false;
764  }
765  }
766  return true;
767 }
768 
769 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
770  const TensorInfo& output,
771  const StackDescriptor& descriptor,
772  Optional<std::string&> reasonIfUnsupported) const
773 {
775  reasonIfUnsupported,
776  inputs,
777  output,
778  descriptor);
779 }
780 
782  const TensorInfo& output,
783  const StridedSliceDescriptor& descriptor,
784  Optional<std::string&> reasonIfUnsupported) const
785 {
787  reasonIfUnsupported,
788  input,
789  output,
790  descriptor);
791 }
792 
794  const TensorInfo& input1,
795  const TensorInfo& output,
796  Optional<std::string&> reasonIfUnsupported) const
797 {
799  reasonIfUnsupported,
800  input0,
801  input1,
802  output);
803 }
804 
806  const TensorInfo& output,
807  const TransposeConvolution2dDescriptor& descriptor,
808  const TensorInfo& weights,
809  const Optional<TensorInfo>& biases,
810  Optional<std::string&> reasonIfUnsupported) const
811 {
813  reasonIfUnsupported,
814  input,
815  output,
816  descriptor,
817  weights,
818  biases);
819 }
820 
822  const TensorInfo& output,
823  const TransposeDescriptor& descriptor,
824  Optional<std::string&> reasonIfUnsupported) const
825 {
826  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
827 }
828 
829 } // namespace armnn
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &desc)
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
A ViewsDescriptor for the SplitterLayer.
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &desc)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, Optional< std::string &> reasonIfUnsupported) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClGreaterWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
A StackDescriptor for the StackLayer.
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:32
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A L2NormalizationDescriptor for the L2NormalizationLayer.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
DataType GetDataType() const
Definition: Tensor.hpp:95
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
uint32_t m_TargetWidth
Target width value.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Status
enumeration
Definition: Types.hpp:26
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &desc)
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PermuteDescriptor for the PermuteLayer.
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override