ArmNN  NotReleased
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
8 
9 #include <armnn/Descriptors.hpp>
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
15 #include <boost/core/ignore_unused.hpp>
16 
17 #if defined(ARMCOMPUTECL_ENABLED)
63 #endif
64 
65 using namespace boost;
66 
67 namespace armnn
68 {
69 
70 namespace
71 {
72 
73 template<unsigned int FilterSize>
74 bool IsMatchingSize2d(const TensorInfo& weightInfo)
75 {
76  // Width & Height must match.
77  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
78 }
79 
80 template<uint32_t ValidStride>
81 bool IsMatchingStride(uint32_t actualStride)
82 {
83  return ValidStride == actualStride;
84 }
85 
86 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
87 bool IsMatchingStride(uint32_t actualStride)
88 {
89  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
90 }
91 
92 template<typename ... Args>
93 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
94 {
95  boost::ignore_unused(reasonIfUnsupported, (args)...);
96 #if defined(ARMCOMPUTECL_ENABLED)
97  return true;
98 #else
99  if (reasonIfUnsupported)
100  {
101  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
102  }
103  return false;
104 #endif
105 }
106 
107 #if defined(ARMCOMPUTECL_ENABLED)
108 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
109 #else
110 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
111 #endif
112 
113 #if defined(ARMCOMPUTECL_ENABLED)
114 template<class FuncType, class... Args>
115 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
116 {
117  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
118  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
119  if (!supported && reasonIfUnsupported)
120  {
121  reasonIfUnsupported.value() = aclStatus.error_description();
122  }
123  return supported;
124 }
125 
126 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
127  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
128 #else
129 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
130  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
131 #endif
132 
133 template<typename FloatFunc, typename Uint8Func, typename ... Params>
134 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
135  DataType dataType,
136  FloatFunc floatFuncPtr,
137  Uint8Func uint8FuncPtr,
138  Params&&... params)
139 {
140  return IsClBackendSupported(reasonIfUnsupported) &&
141  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
142  dataType,
143  floatFuncPtr,
144  floatFuncPtr,
145  uint8FuncPtr,
146  &FalseFunc<>,
147  &FalseFunc<>,
148  std::forward<Params>(params)...);
149 }
150 } // anonymous namespace
151 
152 bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
153  const TensorInfo& output,
154  Optional<std::string&> reasonIfUnsupported) const
155 {
156  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
157  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
158 }
159 
161  const TensorInfo& output,
162  const ActivationDescriptor& descriptor,
163  Optional<std::string&> reasonIfUnsupported) const
164 {
166  reasonIfUnsupported,
167  input,
168  output,
169  descriptor);
170 }
171 
173  const TensorInfo& input1,
174  const TensorInfo& output,
175  Optional<std::string&> reasonIfUnsupported) const
176 {
178  reasonIfUnsupported,
179  input0,
180  input1,
181  output);
182 }
183 
185  const TensorInfo& output,
186  const ArgMinMaxDescriptor& descriptor,
187  Optional<std::string&> reasonIfUnsupported) const
188 {
189 
191  reasonIfUnsupported,
192  input,
193  output,
194  descriptor);
195 }
196 
198  const TensorInfo& output,
199  const TensorInfo& mean,
200  const TensorInfo& var,
201  const TensorInfo& beta,
202  const TensorInfo& gamma,
203  const BatchNormalizationDescriptor& descriptor,
204  Optional<std::string&> reasonIfUnsupported) const
205 {
207  reasonIfUnsupported,
208  input,
209  output,
210  mean,
211  var,
212  beta,
213  gamma,
214  descriptor);
215 }
216 
218  const TensorInfo& output,
219  const BatchToSpaceNdDescriptor& descriptor,
220  Optional<std::string&> reasonIfUnsupported) const
221 {
223  reasonIfUnsupported,
224  input,
225  output,
226  descriptor);
227 }
228 
229 bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
230  const TensorInfo& input1,
231  const TensorInfo& output,
232  const ComparisonDescriptor& descriptor,
233  Optional<std::string&> reasonIfUnsupported) const
234 {
235  if (descriptor.m_Operation == ComparisonOperation::Greater)
236  {
238  reasonIfUnsupported,
239  input0,
240  input1,
241  output);
242  }
243 
244  return false;
245 }
246 
247 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
248  const TensorInfo& output,
249  const ConcatDescriptor& descriptor,
250  Optional<std::string&> reasonIfUnsupported) const
251 {
252  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
253  {
254  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
255  return false;
256  }
257 
258  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
259  if(concatInnerAxis < 3) // Width, height, or channels
260  {
262  reasonIfUnsupported,
263  inputs,
264  output,
265  descriptor);
266  }
267  else if (concatInnerAxis == 3)
268  {
269  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
270  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
271  for (auto& input : inputs)
272  {
273  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
274  {
275  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
276  return false;
277  }
278  }
279  return true; // Sub-tensors support concat along batch
280  }
281  else // > 4 dimensions not supported.
282  {
283  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
284  return false;
285  }
286 }
287 
289  Optional<std::string&> reasonIfUnsupported) const
290 {
291  return IsSupportedForDataTypeCl(reasonIfUnsupported,
292  output.GetDataType(),
293  &TrueFunc<>,
294  &TrueFunc<>);
295 }
296 
298  const TensorInfo& output,
299  Optional<std::string&> reasonIfUnsupported) const
300 {
302  reasonIfUnsupported,
303  input,
304  output);
305 }
306 
308  const TensorInfo& output,
309  Optional<std::string&> reasonIfUnsupported) const
310 {
312  reasonIfUnsupported,
313  input,
314  output);
315 }
316 
318  const TensorInfo& output,
319  const Convolution2dDescriptor& descriptor,
320  const TensorInfo& weights,
321  const Optional<TensorInfo>& biases,
322  Optional<std::string&> reasonIfUnsupported) const
323 {
325  reasonIfUnsupported,
326  input,
327  output,
328  descriptor,
329  weights,
330  biases);
331 }
332 
334  const TensorInfo& output,
335  Optional<std::string&> reasonIfUnsupported) const
336 {
338  reasonIfUnsupported,
339  input,
340  output);
341 }
342 
343 bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
344  const TensorInfo& output,
345  const DepthToSpaceDescriptor& descriptor,
346  Optional<std::string&> reasonIfUnsupported) const
347 {
349  reasonIfUnsupported,
350  input,
351  output,
352  descriptor);
353 }
354 
356  const TensorInfo& output,
357  const DepthwiseConvolution2dDescriptor& descriptor,
358  const TensorInfo& weights,
359  const Optional<TensorInfo>& biases,
360  Optional<std::string&> reasonIfUnsupported) const
361 {
363  reasonIfUnsupported,
364  input,
365  output,
366  descriptor,
367  weights,
368  biases);
369 }
370 
371 bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
372  const TensorInfo& output,
373  const DepthwiseConvolution2dDescriptor& descriptor,
374  const TensorInfo& weights,
375  const Optional<TensorInfo>& biases,
376  Optional<std::string&> reasonIfUnsupported) const
377 {
379  reasonIfUnsupported,
380  input,
381  output,
382  descriptor,
383  weights,
384  biases);
385 }
386 
387 
389  const TensorInfo& input1,
390  const TensorInfo& output,
391  Optional<std::string&> reasonIfUnsupported) const
392 {
394  reasonIfUnsupported,
395  input0,
396  input1,
397  output);
398 }
399 
400 bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
401  const TensorInfo& output,
402  const ElementwiseUnaryDescriptor& descriptor,
403  Optional<std::string&> reasonIfUnsupported) const
404 {
405  if (descriptor.m_Operation == UnaryOperation::Abs)
406  {
408  reasonIfUnsupported,
409  input,
410  output);
411  }
412  else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
413  {
415  reasonIfUnsupported,
416  input,
417  output);
418  }
419 
420  return false;
421 }
422 
424  const TensorInfo& output,
425  Optional<std::string&> reasonIfUnsupported) const
426 {
428  reasonIfUnsupported,
429  input,
430  output);
431 }
432 
434  const TensorInfo& output,
435  const TensorInfo& weights,
436  const TensorInfo& biases,
437  const FullyConnectedDescriptor& descriptor,
438  Optional<std::string&> reasonIfUnsupported) const
439 {
441  reasonIfUnsupported,
442  input,
443  output,
444  weights,
445  biases,
446  descriptor);
447 }
448 
450  const TensorInfo& input1,
451  const TensorInfo& output,
452  Optional<std::string&> reasonIfUnsupported) const
453 {
454  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
455  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
456 }
457 
459  Optional<std::string&> reasonIfUnsupported) const
460 {
461  return IsClBackendSupported(reasonIfUnsupported, input);
462 }
463 
464 bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
465  const TensorInfo& output,
466  const InstanceNormalizationDescriptor& descriptor,
467  Optional<std::string&> reasonIfUnsupported) const
468 {
470  reasonIfUnsupported,
471  input,
472  output,
473  descriptor);
474 }
475 
477  const TensorInfo& output,
478  const L2NormalizationDescriptor& descriptor,
479  Optional<std::string&> reasonIfUnsupported) const
480 {
482  reasonIfUnsupported,
483  input,
484  output,
485  descriptor);
486 }
487 
489  const TensorInfo& outputStateIn,
490  const TensorInfo& cellStateIn,
491  const TensorInfo& scratchBuffer,
492  const TensorInfo& outputStateOut,
493  const TensorInfo& cellStateOut,
494  const TensorInfo& output,
495  const LstmDescriptor& descriptor,
496  const LstmInputParamsInfo& paramsInfo,
497  Optional<std::string&> reasonIfUnsupported) const
498 {
500  reasonIfUnsupported,
501  input,
502  outputStateIn,
503  cellStateIn,
504  scratchBuffer,
505  outputStateOut,
506  cellStateOut,
507  output,
508  descriptor,
509  paramsInfo);
510 }
511 
513  const TensorInfo& input1,
514  const TensorInfo& output,
515  Optional<std::string&> reasonIfUnsupported) const
516 {
518  reasonIfUnsupported,
519  input0,
520  input1,
521  output);
522 }
523 
525  const TensorInfo& output,
526  const MeanDescriptor& descriptor,
527  Optional<std::string&> reasonIfUnsupported) const
528 {
530  reasonIfUnsupported,
531  input,
532  output,
533  descriptor);
534 }
535 
536 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
537  const TensorInfo& output,
538  const MergerDescriptor& descriptor,
539  Optional<std::string&> reasonIfUnsupported) const
540 {
541  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
542 }
543 
545  const TensorInfo& input1,
546  const TensorInfo& output,
547  Optional<std::string&> reasonIfUnsupported) const
548 {
550  reasonIfUnsupported,
551  input0,
552  input1,
553  output);
554 }
555 
557  const TensorInfo& input1,
558  const TensorInfo& output,
559  Optional<std::string&> reasonIfUnsupported) const
560 {
562  reasonIfUnsupported,
563  input0,
564  input1,
565  output);
566 }
567 
569  const TensorInfo& output,
570  const NormalizationDescriptor& descriptor,
571  Optional<std::string&> reasonIfUnsupported) const
572 {
573  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
574 }
575 
577  Optional<std::string&> reasonIfUnsupported) const
578 {
579  return IsClBackendSupported(reasonIfUnsupported, output);
580 }
581 
583  const TensorInfo& output,
584  const PadDescriptor& descriptor,
585  Optional<std::string&> reasonIfUnsupported) const
586 {
588  reasonIfUnsupported,
589  input,
590  output,
591  descriptor);
592 }
593 
595  const TensorInfo& output,
596  const PermuteDescriptor& descriptor,
597  Optional<std::string&> reasonIfUnsupported) const
598 {
599  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
600 }
601 
603  const TensorInfo& output,
604  const Pooling2dDescriptor& descriptor,
605  Optional<std::string&> reasonIfUnsupported) const
606 {
607  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
608 }
609 
611  const armnn::TensorInfo &alpha,
612  const armnn::TensorInfo &output,
613  armnn::Optional<std::string &> reasonIfUnsupported) const
614 {
615  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
616 }
617 
619  const TensorInfo& previousCellStateIn,
620  const TensorInfo& previousOutputIn,
621  const TensorInfo& cellStateOut,
622  const TensorInfo& output,
623  const QuantizedLstmInputParamsInfo& paramsInfo,
624  Optional<std::string&> reasonIfUnsupported) const
625 {
627  reasonIfUnsupported,
628  input,
629  previousCellStateIn,
630  previousOutputIn,
631  cellStateOut,
632  output,
633  paramsInfo);
634 }
635 
637  const TensorInfo& output,
638  Optional<std::string&> reasonIfUnsupported) const
639 {
641  reasonIfUnsupported,
642  input,
643  output);
644 }
645 
647  const TensorInfo& output,
648  const ReshapeDescriptor& descriptor,
649  Optional<std::string&> reasonIfUnsupported) const
650 {
651  ignore_unused(descriptor);
652  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
653 }
654 
656  const TensorInfo& output,
657  const ResizeDescriptor& descriptor,
658  Optional<std::string&> reasonIfUnsupported) const
659 {
660  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
661 }
662 
664  const TensorInfo& output,
665  Optional<std::string&> reasonIfUnsupported) const
666 {
667  ResizeDescriptor descriptor;
668  descriptor.m_Method = ResizeMethod::Bilinear;
669  descriptor.m_DataLayout = DataLayout::NCHW;
670 
671  const TensorShape& outputShape = output.GetShape();
672  descriptor.m_TargetHeight = outputShape[2];
673  descriptor.m_TargetWidth = outputShape[3];
674 
675  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
676 }
677 
679  const TensorInfo& output,
680  Optional<std::string&> reasonIfUnsupported) const
681 {
682  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
683  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
684 }
685 
686 bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
687  const TensorInfo& output,
688  const SliceDescriptor& descriptor,
689  Optional<std::string&> reasonIfUnsupported) const
690 {
691  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
692 }
693 
695  const TensorInfo& output,
696  const SoftmaxDescriptor& descriptor,
697  Optional<std::string&> reasonIfUnsupported) const
698 {
699  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
700 }
701 
703  const TensorInfo& output,
704  const SpaceToBatchNdDescriptor& descriptor,
705  Optional<std::string&> reasonIfUnsupported) const
706 {
708  reasonIfUnsupported,
709  input,
710  output,
711  descriptor);
712 }
713 
715  const TensorInfo& output,
716  const SpaceToDepthDescriptor& descriptor,
717  Optional<std::string&> reasonIfUnsupported) const
718 {
720  reasonIfUnsupported,
721  input,
722  output,
723  descriptor);
724 }
725 
727  const ViewsDescriptor& descriptor,
728  Optional<std::string&> reasonIfUnsupported) const
729 {
730  ignore_unused(descriptor);
731  return IsSupportedForDataTypeCl(reasonIfUnsupported,
732  input.GetDataType(),
733  &TrueFunc<>,
734  &TrueFunc<>);
735 }
736 
738  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
739  const ViewsDescriptor& descriptor,
740  Optional<std::string&> reasonIfUnsupported) const
741 {
742 #if defined(ARMCOMPUTECL_ENABLED)
743  // Split along the last dimension, cannot use sub-tensors
744  // as width and height of the sub-tensors do not match
745  // the width and height of the parent tensor
746  // in case of input with more than 2D.
747  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
748  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
749  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
750  {
752  reasonIfUnsupported,
753  input,
754  outputs,
755  *splitAxis.begin());
756  }
757 #endif
758  boost::ignore_unused(descriptor);
759  for (auto output : outputs)
760  {
761  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
762  {
763  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
764  return false;
765  }
766  }
767  return true;
768 }
769 
770 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
771  const TensorInfo& output,
772  const StackDescriptor& descriptor,
773  Optional<std::string&> reasonIfUnsupported) const
774 {
776  reasonIfUnsupported,
777  inputs,
778  output,
779  descriptor);
780 }
781 
783  const TensorInfo& output,
784  const StridedSliceDescriptor& descriptor,
785  Optional<std::string&> reasonIfUnsupported) const
786 {
788  reasonIfUnsupported,
789  input,
790  output,
791  descriptor);
792 }
793 
795  const TensorInfo& input1,
796  const TensorInfo& output,
797  Optional<std::string&> reasonIfUnsupported) const
798 {
800  reasonIfUnsupported,
801  input0,
802  input1,
803  output);
804 }
805 
807  const TensorInfo& output,
808  const TransposeConvolution2dDescriptor& descriptor,
809  const TensorInfo& weights,
810  const Optional<TensorInfo>& biases,
811  Optional<std::string&> reasonIfUnsupported) const
812 {
814  reasonIfUnsupported,
815  input,
816  output,
817  descriptor,
818  weights,
819  biases);
820 }
821 
822 } // namespace armnn
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Status
Definition: Types.hpp:26
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A NormalizationDescriptor for the NormalizationLayer.
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
uint32_t m_TargetHeight
Target height value.
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsRsqrtSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsResizeBilinearSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A PadDescriptor for the PadLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQuantizedLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A L2NormalizationDescriptor for the L2NormalizationLayer.
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
ISubgraphViewConverter supported
A ReshapeDescriptor for the ReshapeLayer.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
uint32_t GetNumDimensions() const
Get the number of dimensions.
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &desc)
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
An LstmDescriptor for the LstmLayer.
uint32_t GetNumDimensions() const
Get the number of dimensions.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A StackDescriptor for the StackLayer.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
uint32_t m_TargetWidth
Target width value.
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType
Definition: Types.hpp:32
void SetValueChecked(Optional< T &> optionalRef, V &&val)
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &desc)
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
DataType GetDataType() const
Definition: Tensor.hpp:95
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStackSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const StackDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMergerSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SliceDescriptor for the SliceLayer.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A PermuteDescriptor for the PermuteLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A MeanDescriptor for the MeanLayer.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &desc)
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClGreaterWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
A StridedSliceDescriptor for the StridedSliceLayer.