ArmNN
 21.11
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/Tensor.hpp>
13 #include <armnn/Types.hpp>
15 
16 #include <InternalTypes.hpp>
17 #include <LayerSupportCommon.hpp>
20 
21 #if defined(ARMCOMPUTENEON_ENABLED)
80 #endif
81 
82 namespace armnn
83 {
84 
85 namespace
86 {
87 
88 template< typename ... Args>
89 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
90 {
91  IgnoreUnused(reasonIfUnsupported, (args)...);
92 #if defined(ARMCOMPUTENEON_ENABLED)
93  return true;
94 #else
95  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
96  return false;
97 #endif
98 }
99 
100 template<typename FloatFunc, typename Uint8Func, typename ... Params>
101 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
102  DataType dataType,
103  FloatFunc floatFuncPtr,
104  Uint8Func uint8FuncPtr,
105  Params&&... params)
106 {
107  return IsNeonBackendSupported(reasonIfUnsupported) &&
108  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
109  dataType,
110  floatFuncPtr,
111  floatFuncPtr,
112  uint8FuncPtr,
113  &FalseFunc<>,
114  &FalseFunc<>,
115  std::forward<Params>(params)...);
116 }
117 
118 #if defined(ARMCOMPUTENEON_ENABLED)
119 template<class FuncType, class... Args>
120 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
121 {
122  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
123  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
124  if (!supported && reasonIfUnsupported)
125  {
126  reasonIfUnsupported.value() = aclStatus.error_description();
127  }
128  return supported;
129 }
130 
131 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
132  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
133 #else
134 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
135  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
136 #endif
137 } // anonymous namespace
138 
140  : m_ModelContextPtr(modelContextPtr)
141 {
142 }
143 
145  : m_ModelContextPtr(nullptr)
146 {
147 }
148 
150  const TensorInfo& output,
151  const ActivationDescriptor& descriptor,
152  Optional<std::string&> reasonIfUnsupported) const
153 {
154  IgnoreUnused(descriptor);
156  reasonIfUnsupported,
157  input,
158  output,
159  descriptor);
160 }
161 
163  const TensorInfo& input1,
164  const TensorInfo& output,
165  Optional<std::string&> reasonIfUnsupported) const
166 {
168  reasonIfUnsupported,
169  input0,
170  input1,
171  output,
172  nullptr);
173 }
174 
176  const TensorInfo& output,
177  const ArgMinMaxDescriptor& descriptor,
178  Optional<std::string&> reasonIfUnsupported) const
179 {
181  reasonIfUnsupported,
182  input,
183  output,
184  descriptor);
185 }
186 
188  const TensorInfo& output,
189  const TensorInfo& mean,
190  const TensorInfo& var,
191  const TensorInfo& beta,
192  const TensorInfo& gamma,
193  const BatchNormalizationDescriptor& descriptor,
194  Optional<std::string&> reasonIfUnsupported) const
195 {
197  reasonIfUnsupported,
198  input,
199  output,
200  mean,
201  var,
202  beta,
203  gamma,
204  descriptor,
205  nullptr);
206 }
207 
209  const TensorInfo& output,
210  const BatchToSpaceNdDescriptor& descriptor,
211  Optional<std::string&> reasonIfUnsupported) const
212 {
214  reasonIfUnsupported,
215  input,
216  output,
217  descriptor);
218 }
219 
221  const TensorInfo& output,
222  Optional<std::string&> reasonIfUnsupported) const
223 {
225  reasonIfUnsupported,
226  input,
227  output);
228 }
229 
231  const TensorInfo& output,
232  const ChannelShuffleDescriptor& descriptor,
233  Optional<std::string&> reasonIfUnsupported) const
234 {
236  reasonIfUnsupported,
237  input,
238  output,
239  descriptor);
240 }
241 
243  const TensorInfo& input1,
244  const TensorInfo& output,
245  const ComparisonDescriptor& descriptor,
246  Optional<std::string&> reasonIfUnsupported) const
247 {
248 
250  reasonIfUnsupported,
251  input0,
252  input1,
253  output,
254  descriptor);
255 }
256 
257 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
258  const TensorInfo& output,
259  const ConcatDescriptor& descriptor,
260  Optional<std::string&> reasonIfUnsupported) const
261 {
262  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
263  {
264  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
265  return false;
266  }
267 
268  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
269  if(concatInnerAxis < 3) // Width, height, or channels
270  {
272  reasonIfUnsupported,
273  inputs,
274  output,
275  descriptor);
276  }
277  else if (concatInnerAxis == 3)
278  {
279  for (auto& input : inputs)
280  {
281  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
282  {
283  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
284  return false;
285  }
286  }
287  return true; // Sub-tensors support concat along batch
288  }
289  else // > 4 dimensions not supported.
290  {
291  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
292  return false;
293  }
294 }
295 
297  Optional<std::string&> reasonIfUnsupported) const
298 {
300  reasonIfUnsupported,
301  output);
302 }
303 
305  const TensorInfo& output,
306  Optional<std::string&> reasonIfUnsupported) const
307 {
308  armnn::IgnoreUnused(input);
309  armnn::IgnoreUnused(output);
310  armnn::IgnoreUnused(reasonIfUnsupported);
311  return true;
312 }
313 
315  const TensorInfo& output,
316  Optional<std::string&> reasonIfUnsupported) const
317 {
318  armnn::IgnoreUnused(input);
319  armnn::IgnoreUnused(output);
320  armnn::IgnoreUnused(reasonIfUnsupported);
321  return true;
322 }
323 
325  const TensorInfo& output,
326  Optional<std::string&> reasonIfUnsupported) const
327 {
328  armnn::IgnoreUnused(input);
329  armnn::IgnoreUnused(output);
330  armnn::IgnoreUnused(reasonIfUnsupported);
331  return true;
332 }
333 
335  const TensorInfo& output,
336  Optional<std::string&> reasonIfUnsupported) const
337 {
338  armnn::IgnoreUnused(input);
339  armnn::IgnoreUnused(output);
340  armnn::IgnoreUnused(reasonIfUnsupported);
341  return true;
342 }
343 
345  const TensorInfo& output,
346  const Convolution2dDescriptor& descriptor,
347  const TensorInfo& weights,
348  const Optional<TensorInfo>& biases,
349  Optional<std::string&> reasonIfUnsupported) const
350 {
351  bool isFastMathEnabled = false;
352 #if defined(ARMCOMPUTENEON_ENABLED)
353  if (m_ModelContextPtr)
354  {
355  if (m_ModelContextPtr.get() != nullptr)
356  {
357  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
358  if (modelOptions)
359  {
360  isFastMathEnabled = modelOptions->IsFastMathEnabled();
361  }
362  }
363  }
364 #endif
365 
367  reasonIfUnsupported,
368  input,
369  output,
370  descriptor,
371  weights,
372  biases,
373  isFastMathEnabled,
374  nullptr);
375 }
376 
378  const TensorInfo& output,
379  const Convolution3dDescriptor& descriptor,
380  const TensorInfo& weights,
381  const Optional<TensorInfo>& biases,
382  Optional<std::string&> reasonIfUnsupported) const
383 {
384  bool isFastMathEnabled = false;
385 #if defined(ARMCOMPUTENEON_ENABLED)
386  if (m_ModelContextPtr)
387  {
388  if (m_ModelContextPtr.get() != nullptr)
389  {
390  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
391  if (modelOptions)
392  {
393  isFastMathEnabled = modelOptions->IsFastMathEnabled();
394  }
395  }
396  }
397 #endif
398 
400  reasonIfUnsupported,
401  input,
402  output,
403  descriptor,
404  weights,
405  biases,
406  isFastMathEnabled,
407  nullptr);
408 }
409 
411  const TensorInfo& output,
412  const DepthToSpaceDescriptor& descriptor,
413  Optional<std::string&> reasonIfUnsupported) const
414 {
416  reasonIfUnsupported,
417  input,
418  output,
419  descriptor);
420 }
421 
423  const TensorInfo& output,
424  const DepthwiseConvolution2dDescriptor& descriptor,
425  const TensorInfo& weights,
426  const Optional<TensorInfo>& biases,
427  Optional<std::string&> reasonIfUnsupported) const
428 {
430  reasonIfUnsupported,
431  input,
432  output,
433  descriptor,
434  weights,
435  biases,
436  nullptr);
437 }
438 
440  const TensorInfo& output,
441  Optional<std::string&> reasonIfUnsupported) const
442 {
444  reasonIfUnsupported,
445  input,
446  output);
447 }
448 
450  const TensorInfo& output,
451  const DepthwiseConvolution2dDescriptor& descriptor,
452  const TensorInfo& weights,
453  const Optional<TensorInfo>& biases,
454  Optional<std::string&> reasonIfUnsupported) const
455 {
457  reasonIfUnsupported,
458  input,
459  output,
460  descriptor,
461  weights,
462  biases,
463  nullptr);
464 }
465 
467  const TensorInfo& output,
468  const ElementwiseUnaryDescriptor& descriptor,
469  Optional<std::string&> reasonIfUnsupported) const
470 {
471  switch(descriptor.m_Operation)
472  {
473  case UnaryOperation::Abs:
475  reasonIfUnsupported,
476  input,
477  output);
478  case UnaryOperation::Exp:
480  reasonIfUnsupported,
481  input,
482  output);
485  reasonIfUnsupported,
486  input,
487  output);
488  case UnaryOperation::Log:
490  reasonIfUnsupported,
491  input,
492  output);
493  case UnaryOperation::Neg:
495  reasonIfUnsupported,
496  input,
497  output);
500  reasonIfUnsupported,
501  input,
502  output);
503  case UnaryOperation::Sin:
505  reasonIfUnsupported,
506  input,
507  output);
508  default:
509  return false;
510  }
511 }
512 
514  const TensorInfo& output,
515  const FillDescriptor& descriptor,
516  Optional<std::string&> reasonIfUnsupported) const
517 {
518  armnn::IgnoreUnused(input);
519  armnn::IgnoreUnused(output);
520  armnn::IgnoreUnused(descriptor);
521 
522  return IsNeonBackendSupported(reasonIfUnsupported);
523 }
524 
526  const TensorInfo& output,
527  Optional<std::string&> reasonIfUnsupported) const
528 {
529  armnn::IgnoreUnused(output);
530  return IsNeonBackendSupported(reasonIfUnsupported) &&
531  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
532  input.GetDataType(),
533  &FalseFuncF16<>,
534  &TrueFunc<>,
535  &FalseFuncU8<>,
536  &FalseFuncI32<>,
537  &FalseFuncU8<>);
538 }
539 
541  const TensorInfo& output,
542  const TensorInfo& weights,
543  const TensorInfo& biases,
544  const FullyConnectedDescriptor& descriptor,
545  Optional<std::string&> reasonIfUnsupported) const
546 {
548  reasonIfUnsupported,
549  input,
550  output,
551  weights,
552  biases,
553  descriptor,
554  nullptr);
555 }
556 
558  const TensorInfo& input1,
559  const TensorInfo& output,
560  const GatherDescriptor& descriptor,
561  Optional<std::string&> reasonIfUnsupported) const
562 {
564  reasonIfUnsupported,
565  input0,
566  input1,
567  output,
568  descriptor);
569 }
570 
572  Optional<std::string&> reasonIfUnsupported) const
573 {
574  return IsNeonBackendSupported(reasonIfUnsupported, input);
575 }
576 
578  const TensorInfo& output,
579  const InstanceNormalizationDescriptor& descriptor,
580  Optional<std::string&> reasonIfUnsupported) const
581 {
583  reasonIfUnsupported,
584  input,
585  output,
586  descriptor);
587 }
588 
590  const TensorInfo& output,
591  const L2NormalizationDescriptor& descriptor,
592  Optional<std::string&> reasonIfUnsupported) const
593 {
594  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
595 }
596 
598  const TensorInfo& input1,
599  const TensorInfo& output,
600  const LogicalBinaryDescriptor& descriptor,
601  Optional<std::string&> reasonIfUnsupported) const
602 {
603  switch(descriptor.m_Operation)
604  {
607  reasonIfUnsupported,
608  input0,
609  input1,
610  output);
613  reasonIfUnsupported,
614  input0,
615  input1,
616  output);
617  default:
618  return false;
619  }
620 }
621 
623  const TensorInfo& output,
624  const LogSoftmaxDescriptor& descriptor,
625  Optional<std::string&> reasonIfUnsupported) const
626 {
627  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
628 }
629 
631  const TensorInfo& outputStateIn,
632  const TensorInfo& cellStateIn,
633  const TensorInfo& scratchBuffer,
634  const TensorInfo& outputStateOut,
635  const TensorInfo& cellStateOut,
636  const TensorInfo& output,
637  const LstmDescriptor& descriptor,
638  const LstmInputParamsInfo& paramsInfo,
639  Optional<std::string&> reasonIfUnsupported) const
640 {
642  reasonIfUnsupported,
643  input,
644  outputStateIn,
645  cellStateIn,
646  scratchBuffer,
647  outputStateOut,
648  cellStateOut,
649  output,
650  descriptor,
651  paramsInfo);
652 }
653 
655  const TensorInfo& input1,
656  const TensorInfo& output,
657  Optional<std::string&> reasonIfUnsupported) const
658 {
660  reasonIfUnsupported,
661  input0,
662  input1,
663  output);
664 }
665 
667  const TensorInfo& output,
668  const MeanDescriptor& descriptor,
669  Optional<std::string&> reasonIfUnsupported) const
670 {
672  reasonIfUnsupported,
673  input,
674  output,
675  descriptor);
676 }
677 
679  const TensorInfo& input1,
680  const TensorInfo& output,
681  Optional<std::string&> reasonIfUnsupported) const
682 {
684  reasonIfUnsupported,
685  input0,
686  input1,
687  output);
688 }
689 
691  const TensorInfo& input1,
692  const TensorInfo& output,
693  Optional<std::string&> reasonIfUnsupported) const
694 {
696  reasonIfUnsupported,
697  input0,
698  input1,
699  output,
700  nullptr);
701 }
702 
704  const TensorInfo& input1,
705  const TensorInfo& output,
706  Optional<std::string&> reasonIfUnsupported) const
707 {
709  reasonIfUnsupported,
710  input0,
711  input1,
712  output,
713  nullptr);
714 }
715 
717  const TensorInfo& output,
718  const NormalizationDescriptor& descriptor,
719  Optional<std::string&> reasonIfUnsupported) const
720 {
722  reasonIfUnsupported,
723  input,
724  output,
725  descriptor);
726 }
727 
729  Optional<std::string&> reasonIfUnsupported) const
730 {
731  return IsNeonBackendSupported(reasonIfUnsupported, output);
732 }
733 
735  const TensorInfo& output,
736  const PadDescriptor& descriptor,
737  Optional<std::string&> reasonIfUnsupported) const
738 {
740  reasonIfUnsupported,
741  input,
742  output,
743  descriptor);
744 }
745 
747  const TensorInfo& output,
748  const PermuteDescriptor& descriptor,
749  Optional<std::string&> reasonIfUnsupported) const
750 {
751  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
752 }
753 
755  const TensorInfo& output,
756  const Pooling2dDescriptor& descriptor,
757  Optional<std::string&> reasonIfUnsupported) const
758 {
759  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
760 }
761 
763  const armnn::TensorInfo &alpha,
764  const armnn::TensorInfo &output,
765  armnn::Optional<std::string &> reasonIfUnsupported) const
766 {
767  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
768 }
769 
771  const TensorInfo& previousOutputIn,
772  const TensorInfo& previousCellStateIn,
773  const TensorInfo& outputStateOut,
774  const TensorInfo& cellStateOut,
775  const TensorInfo& output,
776  const QLstmDescriptor& descriptor,
777  const LstmInputParamsInfo& paramsInfo,
778  Optional<std::string&> reasonIfUnsupported) const
779 {
780  // Check required here in order to pass IsLayerSupported for datatypes tests
781  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
782  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
783  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
784  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
785  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
787  {
789  reasonIfUnsupported,
790  input,
791  previousCellStateIn,
792  previousOutputIn,
793  cellStateOut,
794  outputStateOut,
795  output,
796  descriptor,
797  paramsInfo);
798  }
799  else
800  {
801  return false;
802  }
803 }
804 
806  const TensorInfo& output,
807  Optional<std::string&> reasonIfUnsupported) const
808 {
810  reasonIfUnsupported,
811  input,
812  output);
813 }
814 
816  const TensorInfo& cellStateIn,
817  const TensorInfo& outputStateIn,
818  const TensorInfo& cellStateOut,
819  const TensorInfo& outputStateOut,
820  const QuantizedLstmInputParamsInfo& paramsInfo,
821  Optional<std::string&> reasonIfUnsupported) const
822 {
824  reasonIfUnsupported,
825  input,
826  cellStateIn,
827  outputStateIn,
828  cellStateOut,
829  outputStateOut,
830  paramsInfo);
831 }
832 
834  const TensorInfo& output,
835  const ReduceDescriptor& descriptor,
836  Optional<std::string&> reasonIfUnsupported) const
837 {
839  reasonIfUnsupported,
840  input,
841  output,
842  descriptor);
843 }
844 
846  const TensorInfo& output,
847  const ReshapeDescriptor& descriptor,
848  Optional<std::string&> reasonIfUnsupported) const
849 {
850  armnn::IgnoreUnused(descriptor);
852  reasonIfUnsupported,
853  input,
854  output);
855 }
856 
858  const TensorInfo& output,
859  const ResizeDescriptor& descriptor,
860  Optional<std::string&> reasonIfUnsupported) const
861 {
863  reasonIfUnsupported,
864  input,
865  output,
866  descriptor);
867 }
868 
870  const TensorInfo& output,
871  const SliceDescriptor& descriptor,
872  Optional<std::string&> reasonIfUnsupported) const
873 {
875  reasonIfUnsupported,
876  input,
877  output,
878  descriptor);
879 }
880 
882  const TensorInfo& output,
883  const SoftmaxDescriptor& descriptor,
884  Optional<std::string&> reasonIfUnsupported) const
885 {
886  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
887 }
888 
890  const TensorInfo& output,
891  const SpaceToBatchNdDescriptor& descriptor,
892  Optional<std::string&> reasonIfUnsupported) const
893 {
895  reasonIfUnsupported,
896  input,
897  output,
898  descriptor);
899 }
900 
902  const TensorInfo& output,
903  const SpaceToDepthDescriptor& descriptor,
904  Optional<std::string&> reasonIfUnsupported) const
905 {
907  reasonIfUnsupported,
908  input,
909  output,
910  descriptor);
911 }
912 
914  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
915  const ViewsDescriptor& descriptor,
916  Optional<std::string&> reasonIfUnsupported) const
917 {
918 #if defined(ARMCOMPUTENEON_ENABLED)
919  // Split along the last dimension, cannot use sub-tensors
920  // as width and height of the sub-tensors do not match
921  // the width and height of the parent tensor
922  // in case of input with more than 2D.
923  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
924  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
925  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
926  {
928  reasonIfUnsupported,
929  input,
930  outputs,
931  *splitAxis.begin());
932  }
933 #endif
934  IgnoreUnused(descriptor);
935  for (auto output : outputs)
936  {
937  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
938  {
939  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
940  return false;
941  }
942  }
943  return true;
944 }
945 
946 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
947  const TensorInfo& output,
948  const StackDescriptor& descriptor,
949  Optional<std::string&> reasonIfUnsupported) const
950 {
952  reasonIfUnsupported,
953  inputs,
954  output,
955  descriptor);
956 }
957 
959  const TensorInfo& output,
960  const StridedSliceDescriptor& descriptor,
961  Optional<std::string&> reasonIfUnsupported) const
962 {
964  reasonIfUnsupported,
965  input,
966  output,
967  descriptor);
968 }
969 
971  const TensorInfo& input1,
972  const TensorInfo& output,
973  Optional<std::string&> reasonIfUnsupported) const
974 {
976  reasonIfUnsupported,
977  input0,
978  input1,
979  output,
980  nullptr);
981 }
982 
984  const TensorInfo& output,
985  const TransposeConvolution2dDescriptor& descriptor,
986  const TensorInfo& weights,
987  const Optional<TensorInfo>& biases,
988  Optional<std::string&> reasonIfUnsupported) const
989 {
991  reasonIfUnsupported,
992  input,
993  output,
994  descriptor,
995  weights,
996  biases);
997 }
998 
1000  const TensorInfo& output,
1001  const TransposeDescriptor& descriptor,
1002  Optional<std::string&> reasonIfUnsupported) const
1003 {
1004  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1005 }
1006 
1007 } // namespace armnn
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:434
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
A ReshapeDescriptor for the ReshapeLayer.
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A StackDescriptor for the StackLayer.
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
A PadDescriptor for the PadLayer.
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:35
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
A FullyConnectedDescriptor for the FullyConnectedLayer.
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
A GatherDescriptor for the GatherLayer.
Status
enumeration
Definition: Types.hpp:29
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
A Pooling2dDescriptor for the Pooling2dLayer.
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
A NormalizationDescriptor for the NormalizationLayer.
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PermuteDescriptor for the PermuteLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override