ArmNN
 21.05
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/Tensor.hpp>
13 #include <armnn/Types.hpp>
15 
16 #include <InternalTypes.hpp>
17 #include <LayerSupportCommon.hpp>
20 
21 #if defined(ARMCOMPUTENEON_ENABLED)
76 #endif
77 
78 namespace armnn
79 {
80 
81 namespace
82 {
83 
84 template< typename ... Args>
85 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
86 {
87  IgnoreUnused(reasonIfUnsupported, (args)...);
88 #if defined(ARMCOMPUTENEON_ENABLED)
89  return true;
90 #else
91  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
92  return false;
93 #endif
94 }
95 
96 template<typename FloatFunc, typename Uint8Func, typename ... Params>
97 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
98  DataType dataType,
99  FloatFunc floatFuncPtr,
100  Uint8Func uint8FuncPtr,
101  Params&&... params)
102 {
103  return IsNeonBackendSupported(reasonIfUnsupported) &&
104  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
105  dataType,
106  floatFuncPtr,
107  floatFuncPtr,
108  uint8FuncPtr,
109  &FalseFunc<>,
110  &FalseFunc<>,
111  std::forward<Params>(params)...);
112 }
113 
114 #if defined(ARMCOMPUTENEON_ENABLED)
115 template<class FuncType, class... Args>
116 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
117 {
118  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
119  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
120  if (!supported && reasonIfUnsupported)
121  {
122  reasonIfUnsupported.value() = aclStatus.error_description();
123  }
124  return supported;
125 }
126 
127 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
128  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
129 #else
130 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
131  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
132 #endif
133 } // anonymous namespace
134 
136  : m_ModelContextPtr(modelContextPtr)
137 {
138 }
139 
141  : m_ModelContextPtr(nullptr)
142 {
143 }
144 
146  const TensorInfo& output,
147  Optional<std::string&> reasonIfUnsupported) const
148 {
150  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
151 }
152 
154  const TensorInfo& output,
155  const ActivationDescriptor& descriptor,
156  Optional<std::string&> reasonIfUnsupported) const
157 {
158  IgnoreUnused(descriptor);
160  reasonIfUnsupported,
161  input,
162  output,
163  descriptor);
164 }
165 
167  const TensorInfo& input1,
168  const TensorInfo& output,
169  Optional<std::string&> reasonIfUnsupported) const
170 {
172  reasonIfUnsupported,
173  input0,
174  input1,
175  output,
176  nullptr);
177 }
178 
180  const TensorInfo& output,
181  const ArgMinMaxDescriptor& descriptor,
182  Optional<std::string&> reasonIfUnsupported) const
183 {
185  reasonIfUnsupported,
186  input,
187  output,
188  descriptor);
189 }
190 
192  const TensorInfo& output,
193  const TensorInfo& mean,
194  const TensorInfo& var,
195  const TensorInfo& beta,
196  const TensorInfo& gamma,
197  const BatchNormalizationDescriptor& descriptor,
198  Optional<std::string&> reasonIfUnsupported) const
199 {
201  reasonIfUnsupported,
202  input,
203  output,
204  mean,
205  var,
206  beta,
207  gamma,
208  descriptor,
209  nullptr);
210 }
211 
213  const TensorInfo& output,
214  const BatchToSpaceNdDescriptor& descriptor,
215  Optional<std::string&> reasonIfUnsupported) const
216 {
218  reasonIfUnsupported,
219  input,
220  output,
221  descriptor);
222 }
223 
225  const TensorInfo& output,
226  Optional<std::string&> reasonIfUnsupported) const
227 {
229  reasonIfUnsupported,
230  input,
231  output);
232 }
233 
235  const TensorInfo& input1,
236  const TensorInfo& output,
237  const ComparisonDescriptor& descriptor,
238  Optional<std::string&> reasonIfUnsupported) const
239 {
240 
242  reasonIfUnsupported,
243  input0,
244  input1,
245  output,
246  descriptor);
247 }
248 
249 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
250  const TensorInfo& output,
251  const ConcatDescriptor& descriptor,
252  Optional<std::string&> reasonIfUnsupported) const
253 {
254  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
255  {
256  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
257  return false;
258  }
259 
260  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
261  if(concatInnerAxis < 3) // Width, height, or channels
262  {
264  reasonIfUnsupported,
265  inputs,
266  output,
267  descriptor);
268  }
269  else if (concatInnerAxis == 3)
270  {
271  for (auto& input : inputs)
272  {
273  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
274  {
275  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
276  return false;
277  }
278  }
279  return true; // Sub-tensors support concat along batch
280  }
281  else // > 4 dimensions not supported.
282  {
283  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
284  return false;
285  }
286 }
287 
289  Optional<std::string&> reasonIfUnsupported) const
290 {
292  reasonIfUnsupported,
293  output);
294 }
295 
297  const TensorInfo& output,
298  Optional<std::string&> reasonIfUnsupported) const
299 {
300  armnn::IgnoreUnused(input);
301  armnn::IgnoreUnused(output);
302  armnn::IgnoreUnused(reasonIfUnsupported);
303  return true;
304 }
305 
307  const TensorInfo& output,
308  Optional<std::string&> reasonIfUnsupported) const
309 {
310  armnn::IgnoreUnused(input);
311  armnn::IgnoreUnused(output);
312  armnn::IgnoreUnused(reasonIfUnsupported);
313  return true;
314 }
315 
317  const TensorInfo& output,
318  Optional<std::string&> reasonIfUnsupported) const
319 {
320  armnn::IgnoreUnused(input);
321  armnn::IgnoreUnused(output);
322  armnn::IgnoreUnused(reasonIfUnsupported);
323  return true;
324 }
325 
327  const TensorInfo& output,
328  Optional<std::string&> reasonIfUnsupported) const
329 {
330  armnn::IgnoreUnused(input);
331  armnn::IgnoreUnused(output);
332  armnn::IgnoreUnused(reasonIfUnsupported);
333  return true;
334 }
335 
337  const TensorInfo& output,
338  const Convolution2dDescriptor& descriptor,
339  const TensorInfo& weights,
340  const Optional<TensorInfo>& biases,
341  Optional<std::string&> reasonIfUnsupported) const
342 {
343  bool isFastMathEnabled = false;
344 #if defined(ARMCOMPUTENEON_ENABLED)
345  if (m_ModelContextPtr)
346  {
347  if (m_ModelContextPtr.get() != nullptr)
348  {
349  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
350  if (modelOptions)
351  {
352  isFastMathEnabled = modelOptions->IsFastMathEnabled();
353  }
354  }
355  }
356 #endif
357 
359  reasonIfUnsupported,
360  input,
361  output,
362  descriptor,
363  weights,
364  biases,
365  isFastMathEnabled,
366  nullptr);
367 }
368 
370  const TensorInfo& output,
371  const DepthToSpaceDescriptor& descriptor,
372  Optional<std::string&> reasonIfUnsupported) const
373 {
375  reasonIfUnsupported,
376  input,
377  output,
378  descriptor);
379 }
380 
382  const TensorInfo& output,
383  const DepthwiseConvolution2dDescriptor& descriptor,
384  const TensorInfo& weights,
385  const Optional<TensorInfo>& biases,
386  Optional<std::string&> reasonIfUnsupported) const
387 {
389  reasonIfUnsupported,
390  input,
391  output,
392  descriptor,
393  weights,
394  biases,
395  nullptr);
396 }
397 
399  const TensorInfo& output,
400  Optional<std::string&> reasonIfUnsupported) const
401 {
403  reasonIfUnsupported,
404  input,
405  output);
406 }
407 
409  const TensorInfo& output,
410  const DepthwiseConvolution2dDescriptor& descriptor,
411  const TensorInfo& weights,
412  const Optional<TensorInfo>& biases,
413  Optional<std::string&> reasonIfUnsupported) const
414 {
416  reasonIfUnsupported,
417  input,
418  output,
419  descriptor,
420  weights,
421  biases,
422  nullptr);
423 }
424 
426  const TensorInfo& output,
427  const ElementwiseUnaryDescriptor& descriptor,
428  Optional<std::string&> reasonIfUnsupported) const
429 {
430  switch(descriptor.m_Operation)
431  {
432  case UnaryOperation::Abs:
434  reasonIfUnsupported,
435  input,
436  output);
437  case UnaryOperation::Exp:
439  reasonIfUnsupported,
440  input,
441  output);
442  case UnaryOperation::Neg:
444  reasonIfUnsupported,
445  input,
446  output);
449  reasonIfUnsupported,
450  input,
451  output);
454  reasonIfUnsupported,
455  input,
456  output);
457  default:
458  return false;
459  }
460 }
461 
463  const TensorInfo& output,
464  const FillDescriptor& descriptor,
465  Optional<std::string&> reasonIfUnsupported) const
466 {
467  armnn::IgnoreUnused(input);
468  armnn::IgnoreUnused(output);
469  armnn::IgnoreUnused(descriptor);
470 
471  return IsNeonBackendSupported(reasonIfUnsupported);
472 }
473 
475  const TensorInfo& output,
476  Optional<std::string&> reasonIfUnsupported) const
477 {
478  armnn::IgnoreUnused(output);
479  return IsNeonBackendSupported(reasonIfUnsupported) &&
480  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
481  input.GetDataType(),
482  &FalseFuncF16<>,
483  &TrueFunc<>,
484  &FalseFuncU8<>,
485  &FalseFuncI32<>,
486  &FalseFuncU8<>);
487 }
488 
490  const TensorInfo& output,
491  const TensorInfo& weights,
492  const TensorInfo& biases,
493  const FullyConnectedDescriptor& descriptor,
494  Optional<std::string&> reasonIfUnsupported) const
495 {
497  reasonIfUnsupported,
498  input,
499  output,
500  weights,
501  biases,
502  descriptor,
503  nullptr);
504 }
505 
507  const TensorInfo& input1,
508  const TensorInfo& output,
509  const GatherDescriptor& descriptor,
510  Optional<std::string&> reasonIfUnsupported) const
511 {
513  reasonIfUnsupported,
514  input0,
515  input1,
516  output,
517  descriptor);
518 }
519 
521  const armnn::TensorInfo& input1,
522  const armnn::TensorInfo& output,
523  armnn::Optional<std::string&> reasonIfUnsupported) const
524 {
526  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
527 }
528 
530  Optional<std::string&> reasonIfUnsupported) const
531 {
532  return IsNeonBackendSupported(reasonIfUnsupported, input);
533 }
534 
536  const TensorInfo& output,
537  const InstanceNormalizationDescriptor& descriptor,
538  Optional<std::string&> reasonIfUnsupported) const
539 {
541  reasonIfUnsupported,
542  input,
543  output,
544  descriptor);
545 }
546 
548  const TensorInfo& output,
549  const L2NormalizationDescriptor& descriptor,
550  Optional<std::string&> reasonIfUnsupported) const
551 {
552  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
553 }
554 
556  const TensorInfo& input1,
557  const TensorInfo& output,
558  const LogicalBinaryDescriptor& descriptor,
559  Optional<std::string&> reasonIfUnsupported) const
560 {
561  switch(descriptor.m_Operation)
562  {
565  reasonIfUnsupported,
566  input0,
567  input1,
568  output);
571  reasonIfUnsupported,
572  input0,
573  input1,
574  output);
575  default:
576  return false;
577  }
578 }
579 
581  const TensorInfo& output,
582  const LogSoftmaxDescriptor& descriptor,
583  Optional<std::string&> reasonIfUnsupported) const
584 {
585  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
586 }
587 
589  const TensorInfo& outputStateIn,
590  const TensorInfo& cellStateIn,
591  const TensorInfo& scratchBuffer,
592  const TensorInfo& outputStateOut,
593  const TensorInfo& cellStateOut,
594  const TensorInfo& output,
595  const LstmDescriptor& descriptor,
596  const LstmInputParamsInfo& paramsInfo,
597  Optional<std::string&> reasonIfUnsupported) const
598 {
600  reasonIfUnsupported,
601  input,
602  outputStateIn,
603  cellStateIn,
604  scratchBuffer,
605  outputStateOut,
606  cellStateOut,
607  output,
608  descriptor,
609  paramsInfo);
610 }
611 
613  const TensorInfo& input1,
614  const TensorInfo& output,
615  Optional<std::string&> reasonIfUnsupported) const
616 {
618  reasonIfUnsupported,
619  input0,
620  input1,
621  output);
622 }
623 
625  const TensorInfo& output,
626  const MeanDescriptor& descriptor,
627  Optional<std::string&> reasonIfUnsupported) const
628 {
630  reasonIfUnsupported,
631  input,
632  output,
633  descriptor);
634 }
635 
636 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
637  const TensorInfo& output,
638  const MergerDescriptor& descriptor,
639  Optional<std::string&> reasonIfUnsupported) const
640 {
641  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
642 }
643 
645  const TensorInfo& input1,
646  const TensorInfo& output,
647  Optional<std::string&> reasonIfUnsupported) const
648 {
650  reasonIfUnsupported,
651  input0,
652  input1,
653  output);
654 }
655 
657  const TensorInfo& input1,
658  const TensorInfo& output,
659  Optional<std::string&> reasonIfUnsupported) const
660 {
662  reasonIfUnsupported,
663  input0,
664  input1,
665  output,
666  nullptr);
667 }
668 
670  const TensorInfo& input1,
671  const TensorInfo& output,
672  Optional<std::string&> reasonIfUnsupported) const
673 {
675  reasonIfUnsupported,
676  input0,
677  input1,
678  output,
679  nullptr);
680 }
681 
683  const TensorInfo& output,
684  const NormalizationDescriptor& descriptor,
685  Optional<std::string&> reasonIfUnsupported) const
686 {
688  reasonIfUnsupported,
689  input,
690  output,
691  descriptor);
692 }
693 
695  Optional<std::string&> reasonIfUnsupported) const
696 {
697  return IsNeonBackendSupported(reasonIfUnsupported, output);
698 }
699 
701  const TensorInfo& output,
702  const PadDescriptor& descriptor,
703  Optional<std::string&> reasonIfUnsupported) const
704 {
706  reasonIfUnsupported,
707  input,
708  output,
709  descriptor);
710 }
711 
713  const TensorInfo& output,
714  const PermuteDescriptor& descriptor,
715  Optional<std::string&> reasonIfUnsupported) const
716 {
717  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
718 }
719 
721  const TensorInfo& output,
722  const Pooling2dDescriptor& descriptor,
723  Optional<std::string&> reasonIfUnsupported) const
724 {
725  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
726 }
727 
729  const armnn::TensorInfo &alpha,
730  const armnn::TensorInfo &output,
731  armnn::Optional<std::string &> reasonIfUnsupported) const
732 {
733  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
734 }
735 
737  const TensorInfo& previousOutputIn,
738  const TensorInfo& previousCellStateIn,
739  const TensorInfo& outputStateOut,
740  const TensorInfo& cellStateOut,
741  const TensorInfo& output,
742  const QLstmDescriptor& descriptor,
743  const LstmInputParamsInfo& paramsInfo,
744  Optional<std::string&> reasonIfUnsupported) const
745 {
746  // Check required here in order to pass IsLayerSupported for datatypes tests
747  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
748  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
749  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
750  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
751  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
753  {
755  reasonIfUnsupported,
756  input,
757  previousCellStateIn,
758  previousOutputIn,
759  cellStateOut,
760  outputStateOut,
761  output,
762  descriptor,
763  paramsInfo);
764  }
765  else
766  {
767  return false;
768  }
769 }
770 
772  const TensorInfo& output,
773  Optional<std::string&> reasonIfUnsupported) const
774 {
776  reasonIfUnsupported,
777  input,
778  output);
779 }
780 
782  const TensorInfo& cellStateIn,
783  const TensorInfo& outputStateIn,
784  const TensorInfo& cellStateOut,
785  const TensorInfo& outputStateOut,
786  const QuantizedLstmInputParamsInfo& paramsInfo,
787  Optional<std::string&> reasonIfUnsupported) const
788 {
790  reasonIfUnsupported,
791  input,
792  cellStateIn,
793  outputStateIn,
794  cellStateOut,
795  outputStateOut,
796  paramsInfo);
797 }
798 
800  const TensorInfo& output,
801  const ReduceDescriptor& descriptor,
802  Optional<std::string&> reasonIfUnsupported) const
803 {
805  reasonIfUnsupported,
806  input,
807  output,
808  descriptor);
809 }
810 
812  const TensorInfo& output,
813  const ReshapeDescriptor& descriptor,
814  Optional<std::string&> reasonIfUnsupported) const
815 {
816  armnn::IgnoreUnused(descriptor);
818  reasonIfUnsupported,
819  input,
820  output);
821 }
822 
824  const TensorInfo& output,
825  const ResizeDescriptor& descriptor,
826  Optional<std::string&> reasonIfUnsupported) const
827 {
829  reasonIfUnsupported,
830  input,
831  output,
832  descriptor);
833 }
834 
836  const TensorInfo& output,
837  Optional<std::string&> reasonIfUnsupported) const
838 {
839  ResizeDescriptor descriptor;
840  descriptor.m_Method = ResizeMethod::Bilinear;
841  descriptor.m_DataLayout = DataLayout::NCHW;
842 
843  const TensorShape& outputShape = output.GetShape();
844  descriptor.m_TargetHeight = outputShape[2];
845  descriptor.m_TargetWidth = outputShape[3];
846 
847  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
848 }
849 
851  const TensorInfo& output,
852  Optional<std::string&> reasonIfUnsupported) const
853 {
855  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
856 }
857 
859  const TensorInfo& output,
860  const SliceDescriptor& descriptor,
861  Optional<std::string&> reasonIfUnsupported) const
862 {
864  reasonIfUnsupported,
865  input,
866  output,
867  descriptor);
868 }
869 
871  const TensorInfo& output,
872  const SoftmaxDescriptor& descriptor,
873  Optional<std::string&> reasonIfUnsupported) const
874 {
875  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
876 }
877 
879  const TensorInfo& output,
880  const SpaceToBatchNdDescriptor& descriptor,
881  Optional<std::string&> reasonIfUnsupported) const
882 {
884  reasonIfUnsupported,
885  input,
886  output,
887  descriptor);
888 }
889 
891  const TensorInfo& output,
892  const SpaceToDepthDescriptor& descriptor,
893  Optional<std::string&> reasonIfUnsupported) const
894 {
896  reasonIfUnsupported,
897  input,
898  output,
899  descriptor);
900 }
901 
903  const ViewsDescriptor& descriptor,
904  Optional<std::string&> reasonIfUnsupported) const
905 {
906  armnn::IgnoreUnused(descriptor);
907  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
908  input.GetDataType(),
909  &TrueFunc<>,
910  &TrueFunc<>);
911 }
912 
914  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
915  const ViewsDescriptor& descriptor,
916  Optional<std::string&> reasonIfUnsupported) const
917 {
918 #if defined(ARMCOMPUTENEON_ENABLED)
919  // Split along the last dimension, cannot use sub-tensors
920  // as width and height of the sub-tensors do not match
921  // the width and height of the parent tensor
922  // in case of input with more than 2D.
923  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
924  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
925  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
926  {
928  reasonIfUnsupported,
929  input,
930  outputs,
931  *splitAxis.begin());
932  }
933 #endif
934  IgnoreUnused(descriptor);
935  for (auto output : outputs)
936  {
937  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
938  {
939  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
940  return false;
941  }
942  }
943  return true;
944 }
945 
946 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
947  const TensorInfo& output,
948  const StackDescriptor& descriptor,
949  Optional<std::string&> reasonIfUnsupported) const
950 {
952  reasonIfUnsupported,
953  inputs,
954  output,
955  descriptor);
956 }
957 
959  const TensorInfo& output,
960  const StridedSliceDescriptor& descriptor,
961  Optional<std::string&> reasonIfUnsupported) const
962 {
964  reasonIfUnsupported,
965  input,
966  output,
967  descriptor);
968 }
969 
971  const TensorInfo& input1,
972  const TensorInfo& output,
973  Optional<std::string&> reasonIfUnsupported) const
974 {
976  reasonIfUnsupported,
977  input0,
978  input1,
979  output,
980  nullptr);
981 }
982 
984  const TensorInfo& output,
985  const TransposeConvolution2dDescriptor& descriptor,
986  const TensorInfo& weights,
987  const Optional<TensorInfo>& biases,
988  Optional<std::string&> reasonIfUnsupported) const
989 {
991  reasonIfUnsupported,
992  input,
993  output,
994  descriptor,
995  weights,
996  biases);
997 }
998 
1000  const TensorInfo& output,
1001  const TransposeDescriptor& descriptor,
1002  Optional<std::string&> reasonIfUnsupported) const
1003 {
1004  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1005 }
1006 
1007 } // namespace armnn
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A StackDescriptor for the StackLayer.
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PadDescriptor for the PadLayer.
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &desc)
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
DataType
Definition: Types.hpp:36
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetWidth
Target width value.
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
A GatherDescriptor for the GatherLayer.
Status
enumeration
Definition: Types.hpp:30
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void SetValueChecked(Optional< T &> optionalRef, V &&val)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
A Pooling2dDescriptor for the Pooling2dLayer.
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
A NormalizationDescriptor for the NormalizationLayer.
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PermuteDescriptor for the PermuteLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override