ArmNN
 20.08
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
8 
9 #include <armnn/Descriptors.hpp>
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/Types.hpp>
14 
15 #include <InternalTypes.hpp>
16 #include <LayerSupportCommon.hpp>
18 
19 #if defined(ARMCOMPUTENEON_ENABLED)
69 #endif
70 
71 namespace armnn
72 {
73 
74 namespace
75 {
76 
77 template< typename ... Args>
78 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
79 {
80  IgnoreUnused(reasonIfUnsupported, (args)...);
81 #if defined(ARMCOMPUTENEON_ENABLED)
82  return true;
83 #else
84  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
85  return false;
86 #endif
87 }
88 
89 template<typename FloatFunc, typename Uint8Func, typename ... Params>
90 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
91  DataType dataType,
92  FloatFunc floatFuncPtr,
93  Uint8Func uint8FuncPtr,
94  Params&&... params)
95 {
96  return IsNeonBackendSupported(reasonIfUnsupported) &&
97  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
98  dataType,
99  floatFuncPtr,
100  floatFuncPtr,
101  uint8FuncPtr,
102  &FalseFunc<>,
103  &FalseFunc<>,
104  std::forward<Params>(params)...);
105 }
106 
107 #if defined(ARMCOMPUTENEON_ENABLED)
108 template<class FuncType, class... Args>
109 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
110 {
111  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
112  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
113  if (!supported && reasonIfUnsupported)
114  {
115  reasonIfUnsupported.value() = aclStatus.error_description();
116  }
117  return supported;
118 }
119 
120 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
121  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
122 #else
123 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
124  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
125 #endif
126 } // anonymous namespace
127 
129  const TensorInfo& output,
130  Optional<std::string&> reasonIfUnsupported) const
131 {
133  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
134 }
135 
137  const TensorInfo& output,
138  const ActivationDescriptor& descriptor,
139  Optional<std::string&> reasonIfUnsupported) const
140 {
141  IgnoreUnused(descriptor);
143  reasonIfUnsupported,
144  input,
145  output,
146  descriptor);
147 }
148 
150  const TensorInfo& input1,
151  const TensorInfo& output,
152  Optional<std::string&> reasonIfUnsupported) const
153 {
155  reasonIfUnsupported,
156  input0,
157  input1,
158  output);
159 }
160 
162  const TensorInfo& output,
163  const ArgMinMaxDescriptor& descriptor,
164  Optional<std::string&> reasonIfUnsupported) const
165 {
167  reasonIfUnsupported,
168  input,
169  output,
170  descriptor);
171 }
172 
174  const TensorInfo& output,
175  const TensorInfo& mean,
176  const TensorInfo& var,
177  const TensorInfo& beta,
178  const TensorInfo& gamma,
179  const BatchNormalizationDescriptor& descriptor,
180  Optional<std::string&> reasonIfUnsupported) const
181 {
183  reasonIfUnsupported,
184  input,
185  output,
186  mean,
187  var,
188  beta,
189  gamma,
190  descriptor);
191 }
192 
194  const TensorInfo& output,
195  const BatchToSpaceNdDescriptor& descriptor,
196  Optional<std::string&> reasonIfUnsupported) const
197 {
199  reasonIfUnsupported,
200  input,
201  output,
202  descriptor);
203 }
204 
206  const TensorInfo& input1,
207  const TensorInfo& output,
208  const ComparisonDescriptor& descriptor,
209  Optional<std::string&> reasonIfUnsupported) const
210 {
211 
213  reasonIfUnsupported,
214  input0,
215  input1,
216  output,
217  descriptor);
218 }
219 
220 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
221  const TensorInfo& output,
222  const ConcatDescriptor& descriptor,
223  Optional<std::string&> reasonIfUnsupported) const
224 {
225  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
226  {
227  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
228  return false;
229  }
230 
231  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
232  if(concatInnerAxis < 3) // Width, height, or channels
233  {
235  reasonIfUnsupported,
236  inputs,
237  output,
238  descriptor);
239  }
240  else if (concatInnerAxis == 3)
241  {
242  for (auto& input : inputs)
243  {
244  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
245  {
246  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
247  return false;
248  }
249  }
250  return true; // Sub-tensors support concat along batch
251  }
252  else // > 4 dimensions not supported.
253  {
254  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
255  return false;
256  }
257 }
258 
260  Optional<std::string&> reasonIfUnsupported) const
261 {
263  reasonIfUnsupported,
264  output);
265 }
266 
268  const TensorInfo& output,
269  Optional<std::string&> reasonIfUnsupported) const
270 {
271  armnn::IgnoreUnused(input);
272  armnn::IgnoreUnused(output);
273  armnn::IgnoreUnused(reasonIfUnsupported);
274  return true;
275 }
276 
278  const TensorInfo& output,
279  Optional<std::string&> reasonIfUnsupported) const
280 {
281  armnn::IgnoreUnused(input);
282  armnn::IgnoreUnused(output);
283  armnn::IgnoreUnused(reasonIfUnsupported);
284  return true;
285 }
286 
288  const TensorInfo& output,
289  Optional<std::string&> reasonIfUnsupported) const
290 {
291  armnn::IgnoreUnused(input);
292  armnn::IgnoreUnused(output);
293  armnn::IgnoreUnused(reasonIfUnsupported);
294  return true;
295 }
296 
298  const TensorInfo& output,
299  Optional<std::string&> reasonIfUnsupported) const
300 {
301  armnn::IgnoreUnused(input);
302  armnn::IgnoreUnused(output);
303  armnn::IgnoreUnused(reasonIfUnsupported);
304  return true;
305 }
306 
308  const TensorInfo& output,
309  const Convolution2dDescriptor& descriptor,
310  const TensorInfo& weights,
311  const Optional<TensorInfo>& biases,
312  Optional<std::string&> reasonIfUnsupported) const
313 {
315  reasonIfUnsupported,
316  input,
317  output,
318  descriptor,
319  weights,
320  biases);
321 }
322 
324  const TensorInfo& output,
325  const DepthToSpaceDescriptor& descriptor,
326  Optional<std::string&> reasonIfUnsupported) const
327 {
329  reasonIfUnsupported,
330  input,
331  output,
332  descriptor);
333 }
334 
336  const TensorInfo& output,
337  const DepthwiseConvolution2dDescriptor& descriptor,
338  const TensorInfo& weights,
339  const Optional<TensorInfo>& biases,
340  Optional<std::string&> reasonIfUnsupported) const
341 {
343  reasonIfUnsupported,
344  input,
345  output,
346  descriptor,
347  weights,
348  biases);
349 }
350 
352  const TensorInfo& output,
353  Optional<std::string&> reasonIfUnsupported) const
354 {
356  reasonIfUnsupported,
357  input,
358  output);
359 }
360 
362  const TensorInfo& output,
363  const DepthwiseConvolution2dDescriptor& descriptor,
364  const TensorInfo& weights,
365  const Optional<TensorInfo>& biases,
366  Optional<std::string&> reasonIfUnsupported) const
367 {
369  reasonIfUnsupported,
370  input,
371  output,
372  descriptor,
373  weights,
374  biases);
375 }
376 
378  const TensorInfo& output,
379  const ElementwiseUnaryDescriptor& descriptor,
380  Optional<std::string&> reasonIfUnsupported) const
381 {
382  switch(descriptor.m_Operation)
383  {
384  case UnaryOperation::Abs:
386  reasonIfUnsupported,
387  input,
388  output);
389  case UnaryOperation::Exp:
391  reasonIfUnsupported,
392  input,
393  output);
394  case UnaryOperation::Neg:
396  reasonIfUnsupported,
397  input,
398  output);
401  reasonIfUnsupported,
402  input,
403  output);
404  default:
405  return false;
406  }
407 }
408 
410  const TensorInfo& output,
411  const FillDescriptor& descriptor,
412  Optional<std::string&> reasonIfUnsupported) const
413 {
414  armnn::IgnoreUnused(input);
415  armnn::IgnoreUnused(output);
416  armnn::IgnoreUnused(descriptor);
417 
418  return IsNeonBackendSupported(reasonIfUnsupported);
419 }
420 
422  const TensorInfo& output,
423  Optional<std::string&> reasonIfUnsupported) const
424 {
425  armnn::IgnoreUnused(output);
426  return IsNeonBackendSupported(reasonIfUnsupported) &&
427  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
428  input.GetDataType(),
429  &FalseFuncF16<>,
430  &TrueFunc<>,
431  &FalseFuncU8<>,
432  &FalseFuncI32<>,
433  &FalseFuncU8<>);
434 }
435 
437  const TensorInfo& output,
438  const TensorInfo& weights,
439  const TensorInfo& biases,
440  const FullyConnectedDescriptor& descriptor,
441  Optional<std::string&> reasonIfUnsupported) const
442 {
444  reasonIfUnsupported,
445  input,
446  output,
447  weights,
448  biases,
449  descriptor);
450 }
451 
453  const TensorInfo& input1,
454  const TensorInfo& output,
455  const GatherDescriptor& descriptor,
456  Optional<std::string&> reasonIfUnsupported) const
457 {
459  reasonIfUnsupported,
460  input0,
461  input1,
462  output,
463  descriptor);
464 }
465 
467  const armnn::TensorInfo& input1,
468  const armnn::TensorInfo& output,
469  armnn::Optional<std::string&> reasonIfUnsupported) const
470 {
472  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
473 }
474 
476  Optional<std::string&> reasonIfUnsupported) const
477 {
478  return IsNeonBackendSupported(reasonIfUnsupported, input);
479 }
480 
482  const TensorInfo& output,
483  const InstanceNormalizationDescriptor& descriptor,
484  Optional<std::string&> reasonIfUnsupported) const
485 {
487  reasonIfUnsupported,
488  input,
489  output,
490  descriptor);
491 }
492 
494  const TensorInfo& output,
495  const L2NormalizationDescriptor& descriptor,
496  Optional<std::string&> reasonIfUnsupported) const
497 {
498  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
499 }
500 
502  const TensorInfo& output,
503  const LogSoftmaxDescriptor& descriptor,
504  Optional<std::string&> reasonIfUnsupported) const
505 {
506  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
507 }
508 
510  const TensorInfo& outputStateIn,
511  const TensorInfo& cellStateIn,
512  const TensorInfo& scratchBuffer,
513  const TensorInfo& outputStateOut,
514  const TensorInfo& cellStateOut,
515  const TensorInfo& output,
516  const LstmDescriptor& descriptor,
517  const LstmInputParamsInfo& paramsInfo,
518  Optional<std::string&> reasonIfUnsupported) const
519 {
521  reasonIfUnsupported,
522  input,
523  outputStateIn,
524  cellStateIn,
525  scratchBuffer,
526  outputStateOut,
527  cellStateOut,
528  output,
529  descriptor,
530  paramsInfo);
531 }
532 
534  const TensorInfo& input1,
535  const TensorInfo& output,
536  Optional<std::string&> reasonIfUnsupported) const
537 {
539  reasonIfUnsupported,
540  input0,
541  input1,
542  output);
543 }
544 
546  const TensorInfo& output,
547  const MeanDescriptor& descriptor,
548  Optional<std::string&> reasonIfUnsupported) const
549 {
551  reasonIfUnsupported,
552  input,
553  output,
554  descriptor);
555 }
556 
557 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
558  const TensorInfo& output,
559  const MergerDescriptor& descriptor,
560  Optional<std::string&> reasonIfUnsupported) const
561 {
562  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
563 }
564 
566  const TensorInfo& input1,
567  const TensorInfo& output,
568  Optional<std::string&> reasonIfUnsupported) const
569 {
571  reasonIfUnsupported,
572  input0,
573  input1,
574  output);
575 }
576 
578  const TensorInfo& input1,
579  const TensorInfo& output,
580  Optional<std::string&> reasonIfUnsupported) const
581 {
583  reasonIfUnsupported,
584  input0,
585  input1,
586  output);
587 }
588 
590  const TensorInfo& input1,
591  const TensorInfo& output,
592  Optional<std::string&> reasonIfUnsupported) const
593 {
595  reasonIfUnsupported,
596  input0,
597  input1,
598  output);
599 }
600 
602  const TensorInfo& output,
603  const NormalizationDescriptor& descriptor,
604  Optional<std::string&> reasonIfUnsupported) const
605 {
607  reasonIfUnsupported,
608  input,
609  output,
610  descriptor);
611 }
612 
614  Optional<std::string&> reasonIfUnsupported) const
615 {
616  return IsNeonBackendSupported(reasonIfUnsupported, output);
617 }
618 
620  const TensorInfo& output,
621  const PadDescriptor& descriptor,
622  Optional<std::string&> reasonIfUnsupported) const
623 {
625  reasonIfUnsupported,
626  input,
627  output,
628  descriptor);
629 }
630 
632  const TensorInfo& output,
633  const PermuteDescriptor& descriptor,
634  Optional<std::string&> reasonIfUnsupported) const
635 {
636  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
637 }
638 
640  const TensorInfo& output,
641  const Pooling2dDescriptor& descriptor,
642  Optional<std::string&> reasonIfUnsupported) const
643 {
644  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
645 }
646 
648  const armnn::TensorInfo &alpha,
649  const armnn::TensorInfo &output,
650  armnn::Optional<std::string &> reasonIfUnsupported) const
651 {
652  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
653 }
654 
656  const TensorInfo& previousOutputIn,
657  const TensorInfo& previousCellStateIn,
658  const TensorInfo& outputStateOut,
659  const TensorInfo& cellStateOut,
660  const TensorInfo& output,
661  const QLstmDescriptor& descriptor,
662  const LstmInputParamsInfo& paramsInfo,
663  Optional<std::string&> reasonIfUnsupported) const
664 {
665  // Check required here in order to pass IsLayerSupported for datatypes tests
666  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
667  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
668  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
669  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
670  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
672  {
674  reasonIfUnsupported,
675  input,
676  previousCellStateIn,
677  previousOutputIn,
678  cellStateOut,
679  outputStateOut,
680  output,
681  descriptor,
682  paramsInfo);
683  }
684  else
685  {
686  return false;
687  }
688 }
689 
691  const TensorInfo& output,
692  Optional<std::string&> reasonIfUnsupported) const
693 {
695  reasonIfUnsupported,
696  input,
697  output);
698 }
699 
701  const TensorInfo& cellStateIn,
702  const TensorInfo& outputStateIn,
703  const TensorInfo& cellStateOut,
704  const TensorInfo& outputStateOut,
705  const QuantizedLstmInputParamsInfo& paramsInfo,
706  Optional<std::string&> reasonIfUnsupported) const
707 {
709  reasonIfUnsupported,
710  input,
711  cellStateIn,
712  outputStateIn,
713  cellStateOut,
714  outputStateOut,
715  paramsInfo);
716 }
717 
719  const TensorInfo& output,
720  const ReshapeDescriptor& descriptor,
721  Optional<std::string&> reasonIfUnsupported) const
722 {
723  armnn::IgnoreUnused(descriptor);
725  reasonIfUnsupported,
726  input,
727  output);
728 }
729 
731  const TensorInfo& output,
732  const ResizeDescriptor& descriptor,
733  Optional<std::string&> reasonIfUnsupported) const
734 {
736  reasonIfUnsupported,
737  input,
738  output,
739  descriptor);
740 }
741 
743  const TensorInfo& output,
744  Optional<std::string&> reasonIfUnsupported) const
745 {
746  ResizeDescriptor descriptor;
747  descriptor.m_Method = ResizeMethod::Bilinear;
748  descriptor.m_DataLayout = DataLayout::NCHW;
749 
750  const TensorShape& outputShape = output.GetShape();
751  descriptor.m_TargetHeight = outputShape[2];
752  descriptor.m_TargetWidth = outputShape[3];
753 
754  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
755 }
756 
758  const TensorInfo& output,
759  Optional<std::string&> reasonIfUnsupported) const
760 {
762  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
763 }
764 
766  const TensorInfo& output,
767  const SliceDescriptor& descriptor,
768  Optional<std::string&> reasonIfUnsupported) const
769 {
771  reasonIfUnsupported,
772  input,
773  output,
774  descriptor);
775 }
776 
778  const TensorInfo& output,
779  const SoftmaxDescriptor& descriptor,
780  Optional<std::string&> reasonIfUnsupported) const
781 {
782  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
783 }
784 
786  const TensorInfo& output,
787  const SpaceToBatchNdDescriptor& descriptor,
788  Optional<std::string&> reasonIfUnsupported) const
789 {
791  reasonIfUnsupported,
792  input,
793  output,
794  descriptor);
795 }
796 
798  const TensorInfo& output,
799  const SpaceToDepthDescriptor& descriptor,
800  Optional<std::string&> reasonIfUnsupported) const
801 {
803  reasonIfUnsupported,
804  input,
805  output,
806  descriptor);
807 }
808 
810  const ViewsDescriptor& descriptor,
811  Optional<std::string&> reasonIfUnsupported) const
812 {
813  armnn::IgnoreUnused(descriptor);
814  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
815  input.GetDataType(),
816  &TrueFunc<>,
817  &TrueFunc<>);
818 }
819 
821  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
822  const ViewsDescriptor& descriptor,
823  Optional<std::string&> reasonIfUnsupported) const
824 {
825 #if defined(ARMCOMPUTENEON_ENABLED)
826  // Split along the last dimension, cannot use sub-tensors
827  // as width and height of the sub-tensors do not match
828  // the width and height of the parent tensor
829  // in case of input with more than 2D.
830  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
831  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
832  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
833  {
835  reasonIfUnsupported,
836  input,
837  outputs,
838  *splitAxis.begin());
839  }
840 #endif
841  IgnoreUnused(descriptor);
842  for (auto output : outputs)
843  {
844  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
845  {
846  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
847  return false;
848  }
849  }
850  return true;
851 }
852 
853 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
854  const TensorInfo& output,
855  const StackDescriptor& descriptor,
856  Optional<std::string&> reasonIfUnsupported) const
857 {
859  reasonIfUnsupported,
860  inputs,
861  output,
862  descriptor);
863 }
864 
866  const TensorInfo& output,
867  const StridedSliceDescriptor& descriptor,
868  Optional<std::string&> reasonIfUnsupported) const
869 {
871  reasonIfUnsupported,
872  input,
873  output,
874  descriptor);
875 }
876 
878  const TensorInfo& input1,
879  const TensorInfo& output,
880  Optional<std::string&> reasonIfUnsupported) const
881 {
883  reasonIfUnsupported,
884  input0,
885  input1,
886  output);
887 }
888 
890  const TensorInfo& output,
891  const TransposeConvolution2dDescriptor& descriptor,
892  const TensorInfo& weights,
893  const Optional<TensorInfo>& biases,
894  Optional<std::string&> reasonIfUnsupported) const
895 {
897  reasonIfUnsupported,
898  input,
899  output,
900  descriptor,
901  weights,
902  biases);
903 }
904 
906  const TensorInfo& output,
907  const TransposeDescriptor& descriptor,
908  Optional<std::string&> reasonIfUnsupported) const
909 {
910  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
911 }
912 
913 } // namespace armnn
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:424
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t GetNumDimensions() const
Get the number of dimensions.
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor)
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:70
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2020 ARM Limited.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A StackDescriptor for the StackLayer.
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:32
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:90
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
A Pooling2dDescriptor for the Pooling2dLayer.
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
A NormalizationDescriptor for the NormalizationLayer.
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override