ArmNN
 21.08
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/Tensor.hpp>
13 #include <armnn/Types.hpp>
15 
16 #include <InternalTypes.hpp>
17 #include <LayerSupportCommon.hpp>
20 
21 #if defined(ARMCOMPUTENEON_ENABLED)
78 #endif
79 
80 namespace armnn
81 {
82 
83 namespace
84 {
85 
86 template< typename ... Args>
87 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
88 {
89  IgnoreUnused(reasonIfUnsupported, (args)...);
90 #if defined(ARMCOMPUTENEON_ENABLED)
91  return true;
92 #else
93  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
94  return false;
95 #endif
96 }
97 
98 template<typename FloatFunc, typename Uint8Func, typename ... Params>
99 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
100  DataType dataType,
101  FloatFunc floatFuncPtr,
102  Uint8Func uint8FuncPtr,
103  Params&&... params)
104 {
105  return IsNeonBackendSupported(reasonIfUnsupported) &&
106  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
107  dataType,
108  floatFuncPtr,
109  floatFuncPtr,
110  uint8FuncPtr,
111  &FalseFunc<>,
112  &FalseFunc<>,
113  std::forward<Params>(params)...);
114 }
115 
116 #if defined(ARMCOMPUTENEON_ENABLED)
117 template<class FuncType, class... Args>
118 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
119 {
120  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
121  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
122  if (!supported && reasonIfUnsupported)
123  {
124  reasonIfUnsupported.value() = aclStatus.error_description();
125  }
126  return supported;
127 }
128 
129 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
130  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
131 #else
132 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
133  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
134 #endif
135 } // anonymous namespace
136 
138  : m_ModelContextPtr(modelContextPtr)
139 {
140 }
141 
143  : m_ModelContextPtr(nullptr)
144 {
145 }
146 
148  const TensorInfo& output,
149  Optional<std::string&> reasonIfUnsupported) const
150 {
152  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
153 }
154 
156  const TensorInfo& output,
157  const ActivationDescriptor& descriptor,
158  Optional<std::string&> reasonIfUnsupported) const
159 {
160  IgnoreUnused(descriptor);
162  reasonIfUnsupported,
163  input,
164  output,
165  descriptor);
166 }
167 
169  const TensorInfo& input1,
170  const TensorInfo& output,
171  Optional<std::string&> reasonIfUnsupported) const
172 {
174  reasonIfUnsupported,
175  input0,
176  input1,
177  output,
178  nullptr);
179 }
180 
182  const TensorInfo& output,
183  const ArgMinMaxDescriptor& descriptor,
184  Optional<std::string&> reasonIfUnsupported) const
185 {
187  reasonIfUnsupported,
188  input,
189  output,
190  descriptor);
191 }
192 
194  const TensorInfo& output,
195  const TensorInfo& mean,
196  const TensorInfo& var,
197  const TensorInfo& beta,
198  const TensorInfo& gamma,
199  const BatchNormalizationDescriptor& descriptor,
200  Optional<std::string&> reasonIfUnsupported) const
201 {
203  reasonIfUnsupported,
204  input,
205  output,
206  mean,
207  var,
208  beta,
209  gamma,
210  descriptor,
211  nullptr);
212 }
213 
215  const TensorInfo& output,
216  const BatchToSpaceNdDescriptor& descriptor,
217  Optional<std::string&> reasonIfUnsupported) const
218 {
220  reasonIfUnsupported,
221  input,
222  output,
223  descriptor);
224 }
225 
227  const TensorInfo& output,
228  Optional<std::string&> reasonIfUnsupported) const
229 {
231  reasonIfUnsupported,
232  input,
233  output);
234 }
235 
237  const TensorInfo& input1,
238  const TensorInfo& output,
239  const ComparisonDescriptor& descriptor,
240  Optional<std::string&> reasonIfUnsupported) const
241 {
242 
244  reasonIfUnsupported,
245  input0,
246  input1,
247  output,
248  descriptor);
249 }
250 
251 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
252  const TensorInfo& output,
253  const ConcatDescriptor& descriptor,
254  Optional<std::string&> reasonIfUnsupported) const
255 {
256  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
257  {
258  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
259  return false;
260  }
261 
262  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
263  if(concatInnerAxis < 3) // Width, height, or channels
264  {
266  reasonIfUnsupported,
267  inputs,
268  output,
269  descriptor);
270  }
271  else if (concatInnerAxis == 3)
272  {
273  for (auto& input : inputs)
274  {
275  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
276  {
277  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
278  return false;
279  }
280  }
281  return true; // Sub-tensors support concat along batch
282  }
283  else // > 4 dimensions not supported.
284  {
285  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
286  return false;
287  }
288 }
289 
291  Optional<std::string&> reasonIfUnsupported) const
292 {
294  reasonIfUnsupported,
295  output);
296 }
297 
299  const TensorInfo& output,
300  Optional<std::string&> reasonIfUnsupported) const
301 {
302  armnn::IgnoreUnused(input);
303  armnn::IgnoreUnused(output);
304  armnn::IgnoreUnused(reasonIfUnsupported);
305  return true;
306 }
307 
309  const TensorInfo& output,
310  Optional<std::string&> reasonIfUnsupported) const
311 {
312  armnn::IgnoreUnused(input);
313  armnn::IgnoreUnused(output);
314  armnn::IgnoreUnused(reasonIfUnsupported);
315  return true;
316 }
317 
319  const TensorInfo& output,
320  Optional<std::string&> reasonIfUnsupported) const
321 {
322  armnn::IgnoreUnused(input);
323  armnn::IgnoreUnused(output);
324  armnn::IgnoreUnused(reasonIfUnsupported);
325  return true;
326 }
327 
329  const TensorInfo& output,
330  Optional<std::string&> reasonIfUnsupported) const
331 {
332  armnn::IgnoreUnused(input);
333  armnn::IgnoreUnused(output);
334  armnn::IgnoreUnused(reasonIfUnsupported);
335  return true;
336 }
337 
339  const TensorInfo& output,
340  const Convolution2dDescriptor& descriptor,
341  const TensorInfo& weights,
342  const Optional<TensorInfo>& biases,
343  Optional<std::string&> reasonIfUnsupported) const
344 {
345  bool isFastMathEnabled = false;
346 #if defined(ARMCOMPUTENEON_ENABLED)
347  if (m_ModelContextPtr)
348  {
349  if (m_ModelContextPtr.get() != nullptr)
350  {
351  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
352  if (modelOptions)
353  {
354  isFastMathEnabled = modelOptions->IsFastMathEnabled();
355  }
356  }
357  }
358 #endif
359 
361  reasonIfUnsupported,
362  input,
363  output,
364  descriptor,
365  weights,
366  biases,
367  isFastMathEnabled,
368  nullptr);
369 }
370 
372  const TensorInfo& output,
373  const DepthToSpaceDescriptor& descriptor,
374  Optional<std::string&> reasonIfUnsupported) const
375 {
377  reasonIfUnsupported,
378  input,
379  output,
380  descriptor);
381 }
382 
384  const TensorInfo& output,
385  const DepthwiseConvolution2dDescriptor& descriptor,
386  const TensorInfo& weights,
387  const Optional<TensorInfo>& biases,
388  Optional<std::string&> reasonIfUnsupported) const
389 {
391  reasonIfUnsupported,
392  input,
393  output,
394  descriptor,
395  weights,
396  biases,
397  nullptr);
398 }
399 
401  const TensorInfo& output,
402  Optional<std::string&> reasonIfUnsupported) const
403 {
405  reasonIfUnsupported,
406  input,
407  output);
408 }
409 
411  const TensorInfo& output,
412  const DepthwiseConvolution2dDescriptor& descriptor,
413  const TensorInfo& weights,
414  const Optional<TensorInfo>& biases,
415  Optional<std::string&> reasonIfUnsupported) const
416 {
418  reasonIfUnsupported,
419  input,
420  output,
421  descriptor,
422  weights,
423  biases,
424  nullptr);
425 }
426 
428  const TensorInfo& output,
429  const ElementwiseUnaryDescriptor& descriptor,
430  Optional<std::string&> reasonIfUnsupported) const
431 {
432  switch(descriptor.m_Operation)
433  {
434  case UnaryOperation::Abs:
436  reasonIfUnsupported,
437  input,
438  output);
439  case UnaryOperation::Exp:
441  reasonIfUnsupported,
442  input,
443  output);
446  reasonIfUnsupported,
447  input,
448  output);
449  case UnaryOperation::Log:
451  reasonIfUnsupported,
452  input,
453  output);
454  case UnaryOperation::Neg:
456  reasonIfUnsupported,
457  input,
458  output);
461  reasonIfUnsupported,
462  input,
463  output);
464  default:
465  return false;
466  }
467 }
468 
470  const TensorInfo& output,
471  const FillDescriptor& descriptor,
472  Optional<std::string&> reasonIfUnsupported) const
473 {
474  armnn::IgnoreUnused(input);
475  armnn::IgnoreUnused(output);
476  armnn::IgnoreUnused(descriptor);
477 
478  return IsNeonBackendSupported(reasonIfUnsupported);
479 }
480 
482  const TensorInfo& output,
483  Optional<std::string&> reasonIfUnsupported) const
484 {
485  armnn::IgnoreUnused(output);
486  return IsNeonBackendSupported(reasonIfUnsupported) &&
487  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
488  input.GetDataType(),
489  &FalseFuncF16<>,
490  &TrueFunc<>,
491  &FalseFuncU8<>,
492  &FalseFuncI32<>,
493  &FalseFuncU8<>);
494 }
495 
497  const TensorInfo& output,
498  const TensorInfo& weights,
499  const TensorInfo& biases,
500  const FullyConnectedDescriptor& descriptor,
501  Optional<std::string&> reasonIfUnsupported) const
502 {
504  reasonIfUnsupported,
505  input,
506  output,
507  weights,
508  biases,
509  descriptor,
510  nullptr);
511 }
512 
514  const TensorInfo& input1,
515  const TensorInfo& output,
516  const GatherDescriptor& descriptor,
517  Optional<std::string&> reasonIfUnsupported) const
518 {
520  reasonIfUnsupported,
521  input0,
522  input1,
523  output,
524  descriptor);
525 }
526 
528  const armnn::TensorInfo& input1,
529  const armnn::TensorInfo& output,
530  armnn::Optional<std::string&> reasonIfUnsupported) const
531 {
533  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
534 }
535 
537  Optional<std::string&> reasonIfUnsupported) const
538 {
539  return IsNeonBackendSupported(reasonIfUnsupported, input);
540 }
541 
543  const TensorInfo& output,
544  const InstanceNormalizationDescriptor& descriptor,
545  Optional<std::string&> reasonIfUnsupported) const
546 {
548  reasonIfUnsupported,
549  input,
550  output,
551  descriptor);
552 }
553 
555  const TensorInfo& output,
556  const L2NormalizationDescriptor& descriptor,
557  Optional<std::string&> reasonIfUnsupported) const
558 {
559  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
560 }
561 
563  const TensorInfo& input1,
564  const TensorInfo& output,
565  const LogicalBinaryDescriptor& descriptor,
566  Optional<std::string&> reasonIfUnsupported) const
567 {
568  switch(descriptor.m_Operation)
569  {
572  reasonIfUnsupported,
573  input0,
574  input1,
575  output);
578  reasonIfUnsupported,
579  input0,
580  input1,
581  output);
582  default:
583  return false;
584  }
585 }
586 
588  const TensorInfo& output,
589  const LogSoftmaxDescriptor& descriptor,
590  Optional<std::string&> reasonIfUnsupported) const
591 {
592  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
593 }
594 
596  const TensorInfo& outputStateIn,
597  const TensorInfo& cellStateIn,
598  const TensorInfo& scratchBuffer,
599  const TensorInfo& outputStateOut,
600  const TensorInfo& cellStateOut,
601  const TensorInfo& output,
602  const LstmDescriptor& descriptor,
603  const LstmInputParamsInfo& paramsInfo,
604  Optional<std::string&> reasonIfUnsupported) const
605 {
607  reasonIfUnsupported,
608  input,
609  outputStateIn,
610  cellStateIn,
611  scratchBuffer,
612  outputStateOut,
613  cellStateOut,
614  output,
615  descriptor,
616  paramsInfo);
617 }
618 
620  const TensorInfo& input1,
621  const TensorInfo& output,
622  Optional<std::string&> reasonIfUnsupported) const
623 {
625  reasonIfUnsupported,
626  input0,
627  input1,
628  output);
629 }
630 
632  const TensorInfo& output,
633  const MeanDescriptor& descriptor,
634  Optional<std::string&> reasonIfUnsupported) const
635 {
637  reasonIfUnsupported,
638  input,
639  output,
640  descriptor);
641 }
642 
643 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
644  const TensorInfo& output,
645  const MergerDescriptor& descriptor,
646  Optional<std::string&> reasonIfUnsupported) const
647 {
648  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
649 }
650 
652  const TensorInfo& input1,
653  const TensorInfo& output,
654  Optional<std::string&> reasonIfUnsupported) const
655 {
657  reasonIfUnsupported,
658  input0,
659  input1,
660  output);
661 }
662 
664  const TensorInfo& input1,
665  const TensorInfo& output,
666  Optional<std::string&> reasonIfUnsupported) const
667 {
669  reasonIfUnsupported,
670  input0,
671  input1,
672  output,
673  nullptr);
674 }
675 
677  const TensorInfo& input1,
678  const TensorInfo& output,
679  Optional<std::string&> reasonIfUnsupported) const
680 {
682  reasonIfUnsupported,
683  input0,
684  input1,
685  output,
686  nullptr);
687 }
688 
690  const TensorInfo& output,
691  const NormalizationDescriptor& descriptor,
692  Optional<std::string&> reasonIfUnsupported) const
693 {
695  reasonIfUnsupported,
696  input,
697  output,
698  descriptor);
699 }
700 
702  Optional<std::string&> reasonIfUnsupported) const
703 {
704  return IsNeonBackendSupported(reasonIfUnsupported, output);
705 }
706 
708  const TensorInfo& output,
709  const PadDescriptor& descriptor,
710  Optional<std::string&> reasonIfUnsupported) const
711 {
713  reasonIfUnsupported,
714  input,
715  output,
716  descriptor);
717 }
718 
720  const TensorInfo& output,
721  const PermuteDescriptor& descriptor,
722  Optional<std::string&> reasonIfUnsupported) const
723 {
724  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
725 }
726 
728  const TensorInfo& output,
729  const Pooling2dDescriptor& descriptor,
730  Optional<std::string&> reasonIfUnsupported) const
731 {
732  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
733 }
734 
736  const armnn::TensorInfo &alpha,
737  const armnn::TensorInfo &output,
738  armnn::Optional<std::string &> reasonIfUnsupported) const
739 {
740  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
741 }
742 
744  const TensorInfo& previousOutputIn,
745  const TensorInfo& previousCellStateIn,
746  const TensorInfo& outputStateOut,
747  const TensorInfo& cellStateOut,
748  const TensorInfo& output,
749  const QLstmDescriptor& descriptor,
750  const LstmInputParamsInfo& paramsInfo,
751  Optional<std::string&> reasonIfUnsupported) const
752 {
753  // Check required here in order to pass IsLayerSupported for datatypes tests
754  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
755  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
756  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
757  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
758  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
760  {
762  reasonIfUnsupported,
763  input,
764  previousCellStateIn,
765  previousOutputIn,
766  cellStateOut,
767  outputStateOut,
768  output,
769  descriptor,
770  paramsInfo);
771  }
772  else
773  {
774  return false;
775  }
776 }
777 
779  const TensorInfo& output,
780  Optional<std::string&> reasonIfUnsupported) const
781 {
783  reasonIfUnsupported,
784  input,
785  output);
786 }
787 
789  const TensorInfo& cellStateIn,
790  const TensorInfo& outputStateIn,
791  const TensorInfo& cellStateOut,
792  const TensorInfo& outputStateOut,
793  const QuantizedLstmInputParamsInfo& paramsInfo,
794  Optional<std::string&> reasonIfUnsupported) const
795 {
797  reasonIfUnsupported,
798  input,
799  cellStateIn,
800  outputStateIn,
801  cellStateOut,
802  outputStateOut,
803  paramsInfo);
804 }
805 
807  const TensorInfo& output,
808  const ReduceDescriptor& descriptor,
809  Optional<std::string&> reasonIfUnsupported) const
810 {
812  reasonIfUnsupported,
813  input,
814  output,
815  descriptor);
816 }
817 
819  const TensorInfo& output,
820  const ReshapeDescriptor& descriptor,
821  Optional<std::string&> reasonIfUnsupported) const
822 {
823  armnn::IgnoreUnused(descriptor);
825  reasonIfUnsupported,
826  input,
827  output);
828 }
829 
831  const TensorInfo& output,
832  const ResizeDescriptor& descriptor,
833  Optional<std::string&> reasonIfUnsupported) const
834 {
836  reasonIfUnsupported,
837  input,
838  output,
839  descriptor);
840 }
841 
843  const TensorInfo& output,
844  Optional<std::string&> reasonIfUnsupported) const
845 {
846  ResizeDescriptor descriptor;
847  descriptor.m_Method = ResizeMethod::Bilinear;
848  descriptor.m_DataLayout = DataLayout::NCHW;
849 
850  const TensorShape& outputShape = output.GetShape();
851  descriptor.m_TargetHeight = outputShape[2];
852  descriptor.m_TargetWidth = outputShape[3];
853 
854  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
855 }
856 
858  const TensorInfo& output,
859  Optional<std::string&> reasonIfUnsupported) const
860 {
862  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
863 }
864 
866  const TensorInfo& output,
867  const SliceDescriptor& descriptor,
868  Optional<std::string&> reasonIfUnsupported) const
869 {
871  reasonIfUnsupported,
872  input,
873  output,
874  descriptor);
875 }
876 
878  const TensorInfo& output,
879  const SoftmaxDescriptor& descriptor,
880  Optional<std::string&> reasonIfUnsupported) const
881 {
882  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
883 }
884 
886  const TensorInfo& output,
887  const SpaceToBatchNdDescriptor& descriptor,
888  Optional<std::string&> reasonIfUnsupported) const
889 {
891  reasonIfUnsupported,
892  input,
893  output,
894  descriptor);
895 }
896 
898  const TensorInfo& output,
899  const SpaceToDepthDescriptor& descriptor,
900  Optional<std::string&> reasonIfUnsupported) const
901 {
903  reasonIfUnsupported,
904  input,
905  output,
906  descriptor);
907 }
908 
910  const ViewsDescriptor& descriptor,
911  Optional<std::string&> reasonIfUnsupported) const
912 {
913  armnn::IgnoreUnused(descriptor);
914  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
915  input.GetDataType(),
916  &TrueFunc<>,
917  &TrueFunc<>);
918 }
919 
921  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
922  const ViewsDescriptor& descriptor,
923  Optional<std::string&> reasonIfUnsupported) const
924 {
925 #if defined(ARMCOMPUTENEON_ENABLED)
926  // Split along the last dimension, cannot use sub-tensors
927  // as width and height of the sub-tensors do not match
928  // the width and height of the parent tensor
929  // in case of input with more than 2D.
930  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
931  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
932  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
933  {
935  reasonIfUnsupported,
936  input,
937  outputs,
938  *splitAxis.begin());
939  }
940 #endif
941  IgnoreUnused(descriptor);
942  for (auto output : outputs)
943  {
944  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
945  {
946  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
947  return false;
948  }
949  }
950  return true;
951 }
952 
953 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
954  const TensorInfo& output,
955  const StackDescriptor& descriptor,
956  Optional<std::string&> reasonIfUnsupported) const
957 {
959  reasonIfUnsupported,
960  inputs,
961  output,
962  descriptor);
963 }
964 
966  const TensorInfo& output,
967  const StridedSliceDescriptor& descriptor,
968  Optional<std::string&> reasonIfUnsupported) const
969 {
971  reasonIfUnsupported,
972  input,
973  output,
974  descriptor);
975 }
976 
978  const TensorInfo& input1,
979  const TensorInfo& output,
980  Optional<std::string&> reasonIfUnsupported) const
981 {
983  reasonIfUnsupported,
984  input0,
985  input1,
986  output,
987  nullptr);
988 }
989 
991  const TensorInfo& output,
992  const TransposeConvolution2dDescriptor& descriptor,
993  const TensorInfo& weights,
994  const Optional<TensorInfo>& biases,
995  Optional<std::string&> reasonIfUnsupported) const
996 {
998  reasonIfUnsupported,
999  input,
1000  output,
1001  descriptor,
1002  weights,
1003  biases);
1004 }
1005 
1007  const TensorInfo& output,
1008  const TransposeDescriptor& descriptor,
1009  Optional<std::string&> reasonIfUnsupported) const
1010 {
1011  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1012 }
1013 
1014 } // namespace armnn
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:434
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A StackDescriptor for the StackLayer.
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PadDescriptor for the PadLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:35
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetWidth
Target width value.
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
A GatherDescriptor for the GatherLayer.
Status
enumeration
Definition: Types.hpp:29
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
A Pooling2dDescriptor for the Pooling2dLayer.
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
A NormalizationDescriptor for the NormalizationLayer.
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PermuteDescriptor for the PermuteLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override