ArmNN
 23.08
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/Types.hpp>
14 
15 #include <InternalTypes.hpp>
16 #include <LayerSupportCommon.hpp>
19 
20 #if defined(ARMCOMPUTENEON_ENABLED)
89 #endif
90 
91 namespace armnn
92 {
93 
94 namespace
95 {
96 
97 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
98 {
99  if (!type)
100  {
101  return info;
102  }
103  return TensorInfo(info.GetShape(),
104  type.value(),
105  info.GetQuantizationScale(),
106  info.GetQuantizationOffset(),
107  info.IsConstant());
108 }
109 
110 template< typename ... Args>
111 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
112 {
113  IgnoreUnused(reasonIfUnsupported, (args)...);
114 #if defined(ARMCOMPUTENEON_ENABLED)
115  return true;
116 #else
117  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
118  return false;
119 #endif
120 }
121 
122 template<typename FloatFunc, typename Uint8Func, typename ... Params>
123 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
124  DataType dataType,
125  FloatFunc floatFuncPtr,
126  Uint8Func uint8FuncPtr,
127  Params&&... params)
128 {
129  return IsNeonBackendSupported(reasonIfUnsupported) &&
130  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
131  dataType,
132  floatFuncPtr,
133  floatFuncPtr,
134  uint8FuncPtr,
135  &FalseFunc<>,
136  &FalseFunc<>,
137  std::forward<Params>(params)...);
138 }
139 
140 #if defined(ARMCOMPUTENEON_ENABLED)
141 template<class FuncType, class... Args>
142 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
143 {
144  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
145  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
146  if (!supported && reasonIfUnsupported)
147  {
148  reasonIfUnsupported.value() = aclStatus.error_description();
149  }
150  return supported;
151 }
152 
153 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
154  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
155 #else
156 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
157  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
158 #endif
159 } // anonymous namespace
160 
162  : m_ModelContextPtr(modelContextPtr)
163 {
164 }
165 
167  : m_ModelContextPtr(nullptr)
168 {
169 }
170 
172  const std::vector<TensorInfo>& infos,
173  const BaseDescriptor& descriptor,
174  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
175  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
176  Optional<std::string&> reasonIfUnsupported,
177  const NeonLayerSupport& support)
178 {
179  switch (type)
180  {
182  return support.IsActivationSupported(infos[0],
183  infos[1],
184  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
185  reasonIfUnsupported);
186  case LayerType::Addition:
187  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
189  return support.IsArgMinMaxSupported(infos[0],
190  infos[1],
191  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
192  reasonIfUnsupported);
194  return support.IsBatchMatMulSupported(infos[0],
195  infos[1],
196  infos[2],
197  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
198  reasonIfUnsupported);
200  return support.IsBatchNormalizationSupported(infos[0],
201  infos[1],
202  infos[2],
203  infos[3],
204  infos[4],
205  infos[5],
206  *(PolymorphicDowncast<const
207  BatchNormalizationDescriptor*>(&descriptor)),
208  reasonIfUnsupported);
210  return support.IsBatchToSpaceNdSupported(infos[0],
211  infos[1],
212  *(PolymorphicDowncast<const
213  BatchToSpaceNdDescriptor*>(&descriptor)),
214  reasonIfUnsupported);
215  case LayerType::Cast:
216  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
218  return support.IsChannelShuffleSupported(infos[0],
219  infos[1],
220  *(PolymorphicDowncast<const
221  ChannelShuffleDescriptor*>(&descriptor)),
222  reasonIfUnsupported);
224  return support.IsComparisonSupported(infos[0],
225  infos[1],
226  infos[2],
227  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
228  reasonIfUnsupported);
229  case LayerType::Concat:
230  {
231  std::vector<const TensorInfo*> inputInfos;
232  for (uint32_t i = 0; i < (infos.size() - 1); i++)
233  {
234  inputInfos.push_back(&infos[i]);
235  }
236  return support.IsConcatSupported(inputInfos,
237  infos[infos.size() - 1],
238  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
239  reasonIfUnsupported);
240  }
241  case LayerType::Constant:
242  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
244  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
246  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
248  {
249  if (infos.size() != 4)
250  {
251  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
252  "TensorInfos should be of format: {input, output, weights, biases}.");
253  }
254 
255  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
256  if (infos[3] == TensorInfo())
257  {
258  return support.IsConvolution2dSupported(infos[0],
259  infos[1],
260  desc,
261  infos[2],
262  EmptyOptional(),
263  reasonIfUnsupported);
264  }
265  else
266  {
267  return support.IsConvolution2dSupported(infos[0],
268  infos[1],
269  desc,
270  infos[2],
271  infos[3],
272  reasonIfUnsupported);
273  }
274  }
276  {
277  if (infos.size() != 4)
278  {
279  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
280  "TensorInfos should be of format: {input, output, weights, biases}.");
281  }
282 
283  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
284  if (infos[3] == TensorInfo())
285  {
286  return support.IsConvolution3dSupported(infos[0],
287  infos[1],
288  desc,
289  infos[2],
290  EmptyOptional(),
291  reasonIfUnsupported);
292  }
293  else
294  {
295  return support.IsConvolution3dSupported(infos[0],
296  infos[1],
297  desc,
298  infos[2],
299  infos[3],
300  reasonIfUnsupported);
301  }
302  }
304  return support.IsDepthToSpaceSupported(infos[0],
305  infos[1],
306  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
307  reasonIfUnsupported);
309  {
310  if (infos.size() != 4)
311  {
312  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
313  "TensorInfos should be of format: {input, output, weights, biases}.");
314  }
315 
316  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
317  if (infos[3] == TensorInfo())
318  {
319  return support.IsDepthwiseConvolutionSupported(infos[0],
320  infos[1],
321  desc,
322  infos[2],
323  EmptyOptional(),
324  reasonIfUnsupported);
325  }
326  else
327  {
328  return support.IsDepthwiseConvolutionSupported(infos[0],
329  infos[1],
330  desc,
331  infos[2],
332  infos[3],
333  reasonIfUnsupported);
334  }
335  }
337  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
339  {
340  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
341  return support.IsDetectionPostProcessSupported(infos[0],
342  infos[1],
343  infos[2],
344  infos[3],
345  infos[4],
346  infos[5],
347  infos[6],
348  desc,
349  reasonIfUnsupported);
350  }
351  case LayerType::Division:
352  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
354  {
355  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
356 
357  switch (desc.m_Operation)
358  {
361  reasonIfUnsupported,
362  infos[0],
363  infos[1],
364  infos[2],
365  nullptr);
368  reasonIfUnsupported,
369  infos[0],
370  infos[1],
371  infos[2],
372  nullptr);
375  reasonIfUnsupported,
376  infos[0],
377  infos[1],
378  infos[2]);
381  reasonIfUnsupported,
382  infos[0],
383  infos[1],
384  infos[2]);
387  reasonIfUnsupported,
388  infos[0],
389  infos[1],
390  infos[2],
391  nullptr);
395  reasonIfUnsupported,
396  infos[0],
397  infos[1],
398  infos[2],
399  desc,
400  nullptr);
403  reasonIfUnsupported,
404  infos[0],
405  infos[1],
406  infos[2],
407  nullptr);
408  default:
409  return false;
410  }
411  }
413  return support.IsElementwiseUnarySupported(infos[0],
414  infos[1],
415  *(PolymorphicDowncast<const
416  ElementwiseUnaryDescriptor*>(&descriptor)),
417  reasonIfUnsupported);
418  case LayerType::Fill:
419  return support.IsFillSupported(infos[0],
420  infos[1],
421  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
422  reasonIfUnsupported);
423  case LayerType::Floor:
424  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
426  return support.IsFullyConnectedSupported(infos[0],
427  infos[1],
428  infos[2],
429  infos[3],
430  *(PolymorphicDowncast<const
431  FullyConnectedDescriptor*>(&descriptor)),
432  reasonIfUnsupported);
433  case LayerType::Gather:
434  return support.IsGatherSupported(infos[0],
435  infos[1],
436  infos[2],
437  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
438  reasonIfUnsupported);
439  case LayerType::GatherNd:
440  return support.IsGatherNdSupported(infos[0],
441  infos[1],
442  infos[2],
443  reasonIfUnsupported);
444  case LayerType::Input:
445  return support.IsInputSupported(infos[0], reasonIfUnsupported);
447  return support.IsInstanceNormalizationSupported(infos[0],
448  infos[1],
449  *(PolymorphicDowncast<const
450  InstanceNormalizationDescriptor*>(&descriptor)),
451  reasonIfUnsupported);
453  return support.IsL2NormalizationSupported(infos[0],
454  infos[1],
455  *(PolymorphicDowncast<const
456  L2NormalizationDescriptor*>(&descriptor)),
457  reasonIfUnsupported);
459  return support.IsLogicalBinarySupported(infos[0],
460  infos[1],
461  infos[2],
462  *(PolymorphicDowncast<const
463  LogicalBinaryDescriptor*>(&descriptor)),
464  reasonIfUnsupported);
466  return support.IsLogSoftmaxSupported(infos[0],
467  infos[1],
468  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
469  reasonIfUnsupported);
470  case LayerType::Lstm:
471  return support.IsLstmSupported(infos[0],
472  infos[1],
473  infos[2],
474  infos[3],
475  infos[4],
476  infos[5],
477  infos[6],
478  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
479  lstmParamsInfo.value(),
480  reasonIfUnsupported);
481  case LayerType::Map:
482  return true;
483  case LayerType::Maximum:
484  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
485  case LayerType::Mean:
486  return support.IsMeanSupported(infos[0],
487  infos[1],
488  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
489  reasonIfUnsupported);
490  case LayerType::MemCopy:
491  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
493  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
494  case LayerType::Merge:
495  return support.IsMergeSupported(infos[0],
496  infos[1],
497  infos[2],
498  reasonIfUnsupported);
499  case LayerType::Minimum:
500  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
502  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
504  return support.IsNormalizationSupported(infos[0],
505  infos[1],
506  *(PolymorphicDowncast<const
507  NormalizationDescriptor*>(&descriptor)),
508  reasonIfUnsupported);
509  case LayerType::Output:
510  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
511  case LayerType::Pad:
512  return support.IsPadSupported(infos[0],
513  infos[1],
514  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
515  reasonIfUnsupported);
516  case LayerType::Permute:
517  return support.IsPermuteSupported(infos[0],
518  infos[1],
519  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
520  reasonIfUnsupported);
522  return support.IsPooling2dSupported(infos[0],
523  infos[1],
524  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
525  reasonIfUnsupported);
527  return support.IsPooling3dSupported(infos[0],
528  infos[1],
529  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
530  reasonIfUnsupported);
531  case LayerType::Prelu:
532  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
533  case LayerType::QLstm:
534  return support.IsQLstmSupported(infos[0],
535  infos[1],
536  infos[2],
537  infos[3],
538  infos[4],
539  infos[5],
540  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
541  lstmParamsInfo.value(),
542  reasonIfUnsupported);
543  case LayerType::Quantize:
544  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
546  return support.IsQuantizedLstmSupported(infos[0],
547  infos[1],
548  infos[2],
549  infos[3],
550  infos[4],
551  quantizedLstmParamsInfo.value(),
552  reasonIfUnsupported);
553  case LayerType::Rank:
554  return true;
555  case LayerType::Reshape:
556  return support.IsReshapeSupported(infos[0],
557  infos[1],
558  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
559  reasonIfUnsupported);
560  case LayerType::Resize:
561  return support.IsResizeSupported(infos[0],
562  infos[1],
563  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
564  reasonIfUnsupported);
565  case LayerType::Reduce:
566  return support.IsReduceSupported(infos[0],
567  infos[1],
568  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
569  reasonIfUnsupported);
570  case LayerType::Shape:
571  return support.IsShapeSupported(infos[0],
572  infos[1],
573  reasonIfUnsupported);
574  case LayerType::Slice:
575  return support.IsSliceSupported(infos[0],
576  infos[1],
577  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
578  reasonIfUnsupported);
579  case LayerType::Softmax:
580  return support.IsSoftmaxSupported(infos[0],
581  infos[1],
582  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
583  reasonIfUnsupported);
585  return support.IsSpaceToBatchNdSupported(infos[0],
586  infos[1],
587  *(PolymorphicDowncast<const
588  SpaceToBatchNdDescriptor*>(&descriptor)),
589  reasonIfUnsupported);
591  return support.IsSpaceToDepthSupported(infos[0],
592  infos[1],
593  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
594  reasonIfUnsupported);
595  case LayerType::Splitter:
596  {
597  std::vector<TensorInfo> outputInfos;
598  for (uint32_t i = 1; i < infos.size(); i++)
599  {
600  outputInfos.push_back(infos[i]);
601  }
602  return support.IsSplitterSupported(infos[0],
603  {outputInfos.begin(), outputInfos.end()},
604  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
605  reasonIfUnsupported);
606  }
607  case LayerType::Stack:
608  {
609  std::vector<const TensorInfo*> inputInfos;
610  for (uint32_t i = 0; i < infos.size() - 1; i++)
611  {
612  inputInfos.push_back(&infos[i]);
613  }
614  return support.IsStackSupported(inputInfos,
615  infos[infos.size() - 1],
616  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
617  reasonIfUnsupported);
618  }
620  return support.IsStridedSliceSupported(infos[0],
621  infos[1],
622  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
623  reasonIfUnsupported);
625  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
626  case LayerType::Tile:
627  return support.IsTileSupported(infos[0],
628  infos[1],
629  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
630  reasonIfUnsupported);
632  return support.IsTransposeSupported(infos[0],
633  infos[1],
634  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
635  reasonIfUnsupported);
637  {
638  if (infos.size() != 4)
639  {
640  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
641  "TensorInfos should be of format: {input, output, weights, biases}.");
642  }
643 
644  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
645  if (infos[3] == TensorInfo())
646  {
647  return support.IsTransposeConvolution2dSupported(infos[0],
648  infos[1],
649  desc,
650  infos[2],
651  EmptyOptional(),
652  reasonIfUnsupported);
653  }
654  else
655  {
656  return support.IsTransposeConvolution2dSupported(infos[0],
657  infos[1],
658  desc,
659  infos[2],
660  infos[3],
661  reasonIfUnsupported);
662  }
663  }
665  {
666  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
667  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
668  infos[1],
669  infos[2],
670  infos[3],
671  infos[4],
672  infos[5],
673  desc,
674  lstmParamsInfo.value(),
675  reasonIfUnsupported);
676  }
677  case LayerType::Unmap:
678  return true;
679  default:
680  // layers not supported in neon by default:
681  // debug, fakequantization, precompiled,
682  // standin, switch
683  return false;
684  }
685 }
686 
688  const std::vector<TensorInfo>& infos,
689  const BaseDescriptor& descriptor,
690  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
691  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
692  Optional<std::string&> reasonIfUnsupported) const
693 {
694  bool isSupported = IsLayerTypeSupported(type,
695  infos,
696  descriptor,
697  lstmParamsInfo,
698  quantizedLstmParamsInfo,
699  reasonIfUnsupported,
700  *this);
701 
702  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
703  // architecture. If the available architecture is older than v8.2, we can check if the operator is
704  // supported by changing operator inputs & outputs to be FP32.
705  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
706  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
707  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
708  // and, insert convert layers around the FP32 operator.
709  if (reasonIfUnsupported.has_value())
710  {
711  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
712  if (!isSupported
713  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
714  {
715  std::vector<TensorInfo> newInfos;
716  for (auto info: infos)
717  {
718  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
719  }
720 
721  std::string tmpString;
722  return IsLayerTypeSupported(type,
723  newInfos,
724  descriptor,
725  lstmParamsInfo,
726  quantizedLstmParamsInfo,
727  tmpString,
728  *this);
729  }
730  }
731 
732  return isSupported;
733 }
734 
736  const TensorInfo& output,
737  const ActivationDescriptor& descriptor,
738  Optional<std::string&> reasonIfUnsupported) const
739 {
740  IgnoreUnused(descriptor);
742  reasonIfUnsupported,
743  input,
744  output,
745  descriptor);
746 }
747 
749  const TensorInfo& input1,
750  const TensorInfo& output,
751  Optional<std::string&> reasonIfUnsupported) const
752 {
754  reasonIfUnsupported,
755  input0,
756  input1,
757  output,
758  nullptr);
759 }
760 
762  const TensorInfo& output,
763  const ArgMinMaxDescriptor& descriptor,
764  Optional<std::string&> reasonIfUnsupported) const
765 {
767  reasonIfUnsupported,
768  input,
769  output,
770  descriptor);
771 }
772 
774  const TensorInfo& inputY,
775  const TensorInfo& output,
776  const BatchMatMulDescriptor& descriptor,
777  Optional<std::string&> reasonIfUnsupported) const
778 {
779  bool isFastMathEnabled = false;
780 #if defined(ARMCOMPUTENEON_ENABLED)
781  if (m_ModelContextPtr)
782  {
783  if (m_ModelContextPtr.get() != nullptr)
784  {
785  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
786  if (modelOptions)
787  {
788  isFastMathEnabled = modelOptions->IsFastMathEnabled();
789  }
790  }
791  }
792 #endif
794  reasonIfUnsupported,
795  inputX,
796  inputY,
797  output,
798  descriptor,
799  isFastMathEnabled,
800  nullptr);
801 }
802 
804  const TensorInfo& output,
805  const TensorInfo& mean,
806  const TensorInfo& var,
807  const TensorInfo& beta,
808  const TensorInfo& gamma,
809  const BatchNormalizationDescriptor& descriptor,
810  Optional<std::string&> reasonIfUnsupported) const
811 {
813  reasonIfUnsupported,
814  input,
815  output,
816  mean,
817  var,
818  beta,
819  gamma,
820  descriptor,
821  nullptr);
822 }
823 
825  const TensorInfo& output,
826  const BatchToSpaceNdDescriptor& descriptor,
827  Optional<std::string&> reasonIfUnsupported) const
828 {
830  reasonIfUnsupported,
831  input,
832  output,
833  descriptor);
834 }
835 
837  const TensorInfo& output,
838  Optional<std::string&> reasonIfUnsupported) const
839 {
841  reasonIfUnsupported,
842  input,
843  output);
844 }
845 
847  const TensorInfo& output,
848  const ChannelShuffleDescriptor& descriptor,
849  Optional<std::string&> reasonIfUnsupported) const
850 {
852  reasonIfUnsupported,
853  input,
854  output,
855  descriptor);
856 }
857 
859  const TensorInfo& input1,
860  const TensorInfo& output,
861  const ComparisonDescriptor& descriptor,
862  Optional<std::string&> reasonIfUnsupported) const
863 {
864 
866  reasonIfUnsupported,
867  input0,
868  input1,
869  output,
870  descriptor);
871 }
872 
873 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
874  const TensorInfo& output,
875  const OriginsDescriptor& descriptor,
876  Optional<std::string&> reasonIfUnsupported) const
877 {
878  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
879  {
880  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
881  return false;
882  }
883 
884  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
885  if(concatInnerAxis < 3) // Width, height, or channels
886  {
888  reasonIfUnsupported,
889  inputs,
890  output,
891  descriptor);
892  }
893  else if (concatInnerAxis == 3)
894  {
895  for (auto& input : inputs)
896  {
897  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
898  {
899  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
900  return false;
901  }
902  }
903  return true; // Sub-tensors support concat along batch
904  }
905  else // > 4 dimensions not supported.
906  {
907  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
908  return false;
909  }
910 }
911 
913  Optional<std::string&> reasonIfUnsupported) const
914 {
916  reasonIfUnsupported,
917  output);
918 }
919 
921  const TensorInfo& output,
922  Optional<std::string&> reasonIfUnsupported) const
923 {
925  reasonIfUnsupported,
926  input,
927  output);
928 }
929 
931  const TensorInfo& output,
932  Optional<std::string&> reasonIfUnsupported) const
933 {
935  reasonIfUnsupported,
936  input,
937  output);
938 }
939 
941  const TensorInfo& output,
942  const Convolution2dDescriptor& descriptor,
943  const TensorInfo& weights,
944  const Optional<TensorInfo>& biases,
945  Optional<std::string&> reasonIfUnsupported) const
946 {
947  bool isFastMathEnabled = false;
948 #if defined(ARMCOMPUTENEON_ENABLED)
949  if (m_ModelContextPtr)
950  {
951  if (m_ModelContextPtr.get() != nullptr)
952  {
953  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
954  if (modelOptions)
955  {
956  isFastMathEnabled = modelOptions->IsFastMathEnabled();
957  }
958  }
959  }
960 #endif
961 
963  reasonIfUnsupported,
964  input,
965  output,
966  descriptor,
967  weights,
968  biases,
969  isFastMathEnabled,
970  nullptr);
971 }
972 
974  const TensorInfo& output,
975  const Convolution3dDescriptor& descriptor,
976  const TensorInfo& weights,
977  const Optional<TensorInfo>& biases,
978  Optional<std::string&> reasonIfUnsupported) const
979 {
980  bool isFastMathEnabled = false;
981 #if defined(ARMCOMPUTENEON_ENABLED)
982  if (m_ModelContextPtr)
983  {
984  if (m_ModelContextPtr.get() != nullptr)
985  {
986  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
987  if (modelOptions)
988  {
989  isFastMathEnabled = modelOptions->IsFastMathEnabled();
990  }
991  }
992  }
993 #endif
994 
996  reasonIfUnsupported,
997  input,
998  output,
999  descriptor,
1000  weights,
1001  biases,
1002  isFastMathEnabled,
1003  nullptr);
1004 }
1005 
1007  const TensorInfo& output,
1008  const DepthToSpaceDescriptor& descriptor,
1009  Optional<std::string&> reasonIfUnsupported) const
1010 {
1012  reasonIfUnsupported,
1013  input,
1014  output,
1015  descriptor);
1016 }
1017 
1019  const TensorInfo& output,
1020  const DepthwiseConvolution2dDescriptor& descriptor,
1021  const TensorInfo& weights,
1022  const Optional<TensorInfo>& biases,
1023  Optional<std::string&> reasonIfUnsupported) const
1024 {
1026  reasonIfUnsupported,
1027  input,
1028  output,
1029  descriptor,
1030  weights,
1031  biases,
1032  nullptr);
1033 }
1034 
1036  const TensorInfo& output,
1037  Optional<std::string&> reasonIfUnsupported) const
1038 {
1040  reasonIfUnsupported,
1041  input,
1042  output);
1043 }
1044 
1046  const TensorInfo& output,
1047  const DepthwiseConvolution2dDescriptor& descriptor,
1048  const TensorInfo& weights,
1049  const Optional<TensorInfo>& biases,
1050  Optional<std::string&> reasonIfUnsupported) const
1051 {
1053  reasonIfUnsupported,
1054  input,
1055  output,
1056  descriptor,
1057  weights,
1058  biases,
1059  nullptr);
1060 }
1061 
1063  const TensorInfo& output,
1064  const ElementwiseUnaryDescriptor& descriptor,
1065  Optional<std::string&> reasonIfUnsupported) const
1066 {
1067  switch(descriptor.m_Operation)
1068  {
1069  case UnaryOperation::Abs:
1071  reasonIfUnsupported,
1072  input,
1073  output);
1074  case UnaryOperation::Exp:
1076  reasonIfUnsupported,
1077  input,
1078  output);
1081  reasonIfUnsupported,
1082  input,
1083  output);
1084  case UnaryOperation::Log:
1086  reasonIfUnsupported,
1087  input,
1088  output);
1089  case UnaryOperation::Neg:
1091  reasonIfUnsupported,
1092  input,
1093  output);
1094  case UnaryOperation::Rsqrt:
1096  reasonIfUnsupported,
1097  input,
1098  output);
1099  case UnaryOperation::Sin:
1101  reasonIfUnsupported,
1102  input,
1103  output);
1104  case UnaryOperation::Sqrt:
1106  reasonIfUnsupported,
1107  input,
1108  output);
1109  default:
1110  return false;
1111  }
1112 }
1113 
1115  const TensorInfo& output,
1116  const FillDescriptor& descriptor,
1117  Optional<std::string&> reasonIfUnsupported) const
1118 {
1119  armnn::IgnoreUnused(input);
1120  armnn::IgnoreUnused(output);
1121  armnn::IgnoreUnused(descriptor);
1122 
1123  return IsNeonBackendSupported(reasonIfUnsupported);
1124 }
1125 
1127  const TensorInfo& output,
1128  Optional<std::string&> reasonIfUnsupported) const
1129 {
1130  armnn::IgnoreUnused(output);
1131  return IsNeonBackendSupported(reasonIfUnsupported) &&
1132  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1133  input.GetDataType(),
1134  &FalseFuncF16<>,
1135  &TrueFunc<>,
1136  &FalseFuncU8<>,
1137  &FalseFuncI32<>,
1138  &FalseFuncU8<>);
1139 }
1140 
1142  const TensorInfo& output,
1143  const TensorInfo& weights,
1144  const TensorInfo& biases,
1145  const FullyConnectedDescriptor& descriptor,
1146  Optional<std::string&> reasonIfUnsupported) const
1147 {
1149  reasonIfUnsupported,
1150  input,
1151  output,
1152  weights,
1153  biases,
1154  descriptor,
1155  nullptr);
1156 }
1157 
1159  const TensorInfo& input1,
1160  const TensorInfo& output,
1161  const GatherDescriptor& descriptor,
1162  Optional<std::string&> reasonIfUnsupported) const
1163 {
1165  reasonIfUnsupported,
1166  input0,
1167  input1,
1168  output,
1169  descriptor);
1170 }
1171 
1173  const TensorInfo& input1,
1174  const TensorInfo& output,
1175  Optional<std::string&> reasonIfUnsupported) const
1176 {
1178  reasonIfUnsupported,
1179  input0,
1180  input1,
1181  output);
1182 }
1183 
1185  Optional<std::string&> reasonIfUnsupported) const
1186 {
1187  return IsNeonBackendSupported(reasonIfUnsupported, input);
1188 }
1189 
1191  const TensorInfo& output,
1192  const InstanceNormalizationDescriptor& descriptor,
1193  Optional<std::string&> reasonIfUnsupported) const
1194 {
1196  reasonIfUnsupported,
1197  input,
1198  output,
1199  descriptor);
1200 }
1201 
1203  const TensorInfo& output,
1204  const L2NormalizationDescriptor& descriptor,
1205  Optional<std::string&> reasonIfUnsupported) const
1206 {
1207  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1208 }
1209 
1211  const TensorInfo& input1,
1212  const TensorInfo& output,
1213  const LogicalBinaryDescriptor& descriptor,
1214  Optional<std::string&> reasonIfUnsupported) const
1215 {
1216  switch(descriptor.m_Operation)
1217  {
1220  reasonIfUnsupported,
1221  input0,
1222  input1,
1223  output);
1226  reasonIfUnsupported,
1227  input0,
1228  input1,
1229  output);
1230  default:
1231  return false;
1232  }
1233 }
1234 
1236  const TensorInfo& output,
1237  const LogSoftmaxDescriptor& descriptor,
1238  Optional<std::string&> reasonIfUnsupported) const
1239 {
1240  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1241 }
1242 
1244  const TensorInfo& outputStateIn,
1245  const TensorInfo& cellStateIn,
1246  const TensorInfo& scratchBuffer,
1247  const TensorInfo& outputStateOut,
1248  const TensorInfo& cellStateOut,
1249  const TensorInfo& output,
1250  const LstmDescriptor& descriptor,
1251  const LstmInputParamsInfo& paramsInfo,
1252  Optional<std::string&> reasonIfUnsupported) const
1253 {
1255  reasonIfUnsupported,
1256  input,
1257  outputStateIn,
1258  cellStateIn,
1259  scratchBuffer,
1260  outputStateOut,
1261  cellStateOut,
1262  output,
1263  descriptor,
1264  paramsInfo);
1265 }
1266 
1268  const TensorInfo& input1,
1269  const TensorInfo& output,
1270  Optional<std::string&> reasonIfUnsupported) const
1271 {
1273  reasonIfUnsupported,
1274  input0,
1275  input1,
1276  output);
1277 }
1278 
1280  const TensorInfo& output,
1281  const MeanDescriptor& descriptor,
1282  Optional<std::string&> reasonIfUnsupported) const
1283 {
1285  reasonIfUnsupported,
1286  input,
1287  output,
1288  descriptor);
1289 }
1290 
1292  const TensorInfo& input1,
1293  const TensorInfo& output,
1294  Optional<std::string&> reasonIfUnsupported) const
1295 {
1297  reasonIfUnsupported,
1298  input0,
1299  input1,
1300  output);
1301 }
1302 
1304  const TensorInfo& input1,
1305  const TensorInfo& output,
1306  Optional<std::string&> reasonIfUnsupported) const
1307 {
1309  reasonIfUnsupported,
1310  input0,
1311  input1,
1312  output,
1313  nullptr);
1314 }
1315 
1317  const TensorInfo& input1,
1318  const TensorInfo& output,
1319  Optional<std::string&> reasonIfUnsupported) const
1320 {
1322  reasonIfUnsupported,
1323  input0,
1324  input1,
1325  output,
1326  nullptr);
1327 }
1328 
1330  const TensorInfo& output,
1331  const NormalizationDescriptor& descriptor,
1332  Optional<std::string&> reasonIfUnsupported) const
1333 {
1335  reasonIfUnsupported,
1336  input,
1337  output,
1338  descriptor);
1339 }
1340 
1342  Optional<std::string&> reasonIfUnsupported) const
1343 {
1344  return IsNeonBackendSupported(reasonIfUnsupported, output);
1345 }
1346 
1348  const TensorInfo& output,
1349  const PadDescriptor& descriptor,
1350  Optional<std::string&> reasonIfUnsupported) const
1351 {
1353  reasonIfUnsupported,
1354  input,
1355  output,
1356  descriptor);
1357 }
1358 
1360  const TensorInfo& output,
1361  const PermuteDescriptor& descriptor,
1362  Optional<std::string&> reasonIfUnsupported) const
1363 {
1364  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1365 }
1366 
1368  const TensorInfo& output,
1369  const Pooling2dDescriptor& descriptor,
1370  Optional<std::string&> reasonIfUnsupported) const
1371 {
1372  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1373 }
1374 
1376  const TensorInfo& output,
1377  const Pooling3dDescriptor& descriptor,
1378  Optional<std::string&> reasonIfUnsupported) const
1379 {
1380  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1381 }
1382 
1384  const armnn::TensorInfo &alpha,
1385  const armnn::TensorInfo &output,
1386  armnn::Optional<std::string &> reasonIfUnsupported) const
1387 {
1388  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1389 }
1390 
1392  const TensorInfo& previousOutputIn,
1393  const TensorInfo& previousCellStateIn,
1394  const TensorInfo& outputStateOut,
1395  const TensorInfo& cellStateOut,
1396  const TensorInfo& output,
1397  const QLstmDescriptor& descriptor,
1398  const LstmInputParamsInfo& paramsInfo,
1399  Optional<std::string&> reasonIfUnsupported) const
1400 {
1401  // Check required here in order to pass IsLayerSupported for datatypes tests
1402  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1403  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1404  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1405  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1406  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1408  {
1410  reasonIfUnsupported,
1411  input,
1412  previousCellStateIn,
1413  previousOutputIn,
1414  cellStateOut,
1415  outputStateOut,
1416  output,
1417  descriptor,
1418  paramsInfo);
1419  }
1420  else
1421  {
1422  return false;
1423  }
1424 }
1425 
1427  const TensorInfo& output,
1428  Optional<std::string&> reasonIfUnsupported) const
1429 {
1431  reasonIfUnsupported,
1432  input,
1433  output);
1434 }
1435 
1437  const TensorInfo& cellStateIn,
1438  const TensorInfo& outputStateIn,
1439  const TensorInfo& cellStateOut,
1440  const TensorInfo& outputStateOut,
1441  const QuantizedLstmInputParamsInfo& paramsInfo,
1442  Optional<std::string&> reasonIfUnsupported) const
1443 {
1445  reasonIfUnsupported,
1446  input,
1447  cellStateIn,
1448  outputStateIn,
1449  cellStateOut,
1450  outputStateOut,
1451  paramsInfo);
1452 }
1453 
1455  const TensorInfo& output,
1456  const ReduceDescriptor& descriptor,
1457  Optional<std::string&> reasonIfUnsupported) const
1458 {
1460  reasonIfUnsupported,
1461  input,
1462  output,
1463  descriptor);
1464 }
1465 
1467  const TensorInfo& output,
1468  const ReshapeDescriptor& descriptor,
1469  Optional<std::string&> reasonIfUnsupported) const
1470 {
1471  armnn::IgnoreUnused(descriptor);
1473  reasonIfUnsupported,
1474  input,
1475  output);
1476 }
1477 
1479  const TensorInfo& output,
1480  const ResizeDescriptor& descriptor,
1481  Optional<std::string&> reasonIfUnsupported) const
1482 {
1484  reasonIfUnsupported,
1485  input,
1486  output,
1487  descriptor);
1488 }
1489 
1491  const TensorInfo& output,
1492  const SliceDescriptor& descriptor,
1493  Optional<std::string&> reasonIfUnsupported) const
1494 {
1496  reasonIfUnsupported,
1497  input,
1498  output,
1499  descriptor);
1500 }
1501 
1503  const TensorInfo& output,
1504  const SoftmaxDescriptor& descriptor,
1505  Optional<std::string&> reasonIfUnsupported) const
1506 {
1507  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1508 }
1509 
1511  const TensorInfo& output,
1512  const SpaceToBatchNdDescriptor& descriptor,
1513  Optional<std::string&> reasonIfUnsupported) const
1514 {
1516  reasonIfUnsupported,
1517  input,
1518  output,
1519  descriptor);
1520 }
1521 
1523  const TensorInfo& output,
1524  const SpaceToDepthDescriptor& descriptor,
1525  Optional<std::string&> reasonIfUnsupported) const
1526 {
1528  reasonIfUnsupported,
1529  input,
1530  output,
1531  descriptor);
1532 }
1533 
1535  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1536  const ViewsDescriptor& descriptor,
1537  Optional<std::string&> reasonIfUnsupported) const
1538 {
1539 #if defined(ARMCOMPUTENEON_ENABLED)
1540  // Split along the last dimension, cannot use sub-tensors
1541  // as width and height of the sub-tensors do not match
1542  // the width and height of the parent tensor
1543  // in case of input with more than 2D.
1544  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1545  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1546  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1547  {
1549  reasonIfUnsupported,
1550  input,
1551  outputs,
1552  *splitAxis.begin());
1553  }
1554 #endif
1555  IgnoreUnused(descriptor);
1556  for (auto output : outputs)
1557  {
1558  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1559  {
1560  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1561  return false;
1562  }
1563  }
1564  return true;
1565 }
1566 
1567 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1568  const TensorInfo& output,
1569  const StackDescriptor& descriptor,
1570  Optional<std::string&> reasonIfUnsupported) const
1571 {
1573  reasonIfUnsupported,
1574  inputs,
1575  output,
1576  descriptor);
1577 }
1578 
1580  const TensorInfo& output,
1581  const StridedSliceDescriptor& descriptor,
1582  Optional<std::string&> reasonIfUnsupported) const
1583 {
1585  reasonIfUnsupported,
1586  input,
1587  output,
1588  descriptor);
1589 }
1590 
1592  const TensorInfo& input1,
1593  const TensorInfo& output,
1594  Optional<std::string&> reasonIfUnsupported) const
1595 {
1597  reasonIfUnsupported,
1598  input0,
1599  input1,
1600  output,
1601  nullptr);
1602 }
1603 
1605  const TensorInfo& output,
1606  const TileDescriptor& descriptor,
1607  Optional<std::string&> reasonIfUnsupported) const
1608 {
1610  reasonIfUnsupported,
1611  input,
1612  output,
1613  descriptor);
1614 }
1615 
1617  const TensorInfo& output,
1618  const TransposeConvolution2dDescriptor& descriptor,
1619  const TensorInfo& weights,
1620  const Optional<TensorInfo>& biases,
1621  Optional<std::string&> reasonIfUnsupported) const
1622 {
1624  reasonIfUnsupported,
1625  input,
1626  output,
1627  descriptor,
1628  weights,
1629  biases);
1630 }
1631 
1633  const TensorInfo& output,
1634  const TransposeDescriptor& descriptor,
1635  Optional<std::string&> reasonIfUnsupported) const
1636 {
1637  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1638 }
1639 
1641  const TensorInfo& outputStateIn,
1642  const TensorInfo& cellStateIn,
1643  const TensorInfo& outputStateOut,
1644  const TensorInfo& cellStateOut,
1645  const TensorInfo& output,
1646  const UnidirectionalSequenceLstmDescriptor& descriptor,
1647  const LstmInputParamsInfo& paramsInfo,
1648  Optional<std::string&> reasonIfUnsupported) const
1649 {
1650  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1651  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1652  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1653  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1654  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1656  {
1658  reasonIfUnsupported,
1659  input,
1660  outputStateIn,
1661  cellStateIn,
1662  outputStateOut,
1663  cellStateOut,
1664  output,
1665  descriptor,
1666  paramsInfo);
1667  }
1668  else
1669  {
1671  reasonIfUnsupported,
1672  input,
1673  outputStateIn,
1674  cellStateIn,
1675  outputStateOut,
1676  cellStateOut,
1677  output,
1678  descriptor,
1679  paramsInfo);
1680  }
1681 }
1682 
1683 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:748
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
NeonConcatWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
NeonComparisonWorkload.hpp
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:15
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1035
NeonConvertFp16ToFp32Workload.hpp
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::BinaryOperation::Mul
@ Mul
NeonAbsWorkload.hpp
NeonNegWorkload.hpp
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::NeonTileWorkloadValidate
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: NeonTileWorkload.cpp:14
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
NeonSoftmaxWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1510
NeonExpWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1359
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
NeonStridedSliceWorkload.hpp
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1359
NeonNormalizationFloatWorkload.hpp
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1567
NeonAdditionWorkload.hpp
NeonMeanWorkload.hpp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:964
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:944
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:773
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1375
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:930
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
NeonDivisionWorkload.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1383
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
NeonPermuteWorkload.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
NeonBatchNormalizationWorkload.hpp
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1541
armnn::DataType::Float32
@ Float32
NeonPadWorkload.hpp
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1303
NeonQuantizeWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
NeonChannelShuffleWorkload.hpp
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Tile
@ Tile
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
NeonLogicalAndWorkload.hpp
NeonConvolution2dWorkload.hpp
NeonPooling2dWorkload.hpp
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::LayerType::Stack
@ Stack
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
BackendRegistry.hpp
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1230
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:912
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1210
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:824
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
NeonSpaceToDepthWorkload.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NeonReshapeWorkload.hpp
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1466
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:803
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1172
NeonArgMinMaxWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1502
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
LayerSupportCommon.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1522
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: NeonLayerSupport.cpp:687
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::LayerType::Slice
@ Slice
NeonLogSoftmaxWorkload.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::BinaryOperation::Maximum
@ Maximum
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1478
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
NeonGatherWorkload.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1454
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1329
NeonPooling3dWorkload.hpp
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1490
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1591
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1513
NeonReduceWorkload.hpp
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1175
armnn::NeonLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1604
armnn::UnaryOperation::Exp
@ Exp
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1469
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:85
NeonLogicalOrWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1426
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1207
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1341
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
NeonDepthToSpaceWorkload.hpp
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1291
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1002
NeonDepthwiseConvolutionWorkload.hpp
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1347
NeonTransposeWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1006
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1632
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1563
NeonBackendId.hpp
armnn::LayerType::Addition
@ Addition
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
NeonBatchToSpaceNdWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
ArmComputeUtils.hpp
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1022
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1436
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
NeonQLstmWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonRsqrtWorkload.hpp
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:873
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
NeonSubtractionWorkload.hpp
NeonCastWorkload.hpp
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1141
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1062
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1018
armnn::LayerType::Division
@ Division
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
armnn::LayerType::Shape
@ Shape
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1114
NeonLogWorkload.hpp
NeonResizeWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
NeonTransposeConvolution2dWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
NeonFullyConnectedWorkload.hpp
NeonSinWorkload.hpp
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:15
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:846
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
NeonInstanceNormalizationWorkload.hpp
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:153
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1081
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1282
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1202
Tensor.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:761
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1190
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1497
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1616
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:32
NeonSpaceToBatchNdWorkload.hpp
NeonConstantWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::LayerType::L2Normalization
@ L2Normalization
NeonLstmFloatWorkload.hpp
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
NeonMaximumWorkload.hpp
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:836
NeonQuantizedLstmWorkload.hpp
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:300
armnn::LayerType::Minimum
@ Minimum
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1126
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:973
NeonBatchMatMulWorkload.hpp
NeonMinimumWorkload.hpp
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1184
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1279
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1158
NeonLogicalNotWorkload.hpp
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::LayerType::MemCopy
@ MemCopy
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1419
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:166
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
NeonSqrtWorkload.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
InternalTypes.hpp
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:920
NeonStackWorkload.hpp
NeonBackendModelContext.hpp
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
NeonPreluWorkload.hpp
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1316
armnn::LayerType::Input
@ Input
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1045
armnn::LayerType::Resize
@ Resize
NeonElementwiseBinaryWorkload.hpp
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1267
NeonGatherNdWorkload.hpp
NeonTileWorkload.hpp
armnn::NeonElementwiseBinaryWorkloadValidate
arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonElementwiseBinaryWorkload.cpp:20
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:171
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:858
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::BinaryOperation::Div
@ Div
NeonMultiplicationWorkload.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
NeonUnidirectionalSequenceLstmWorkload.hpp
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
NeonL2NormalizationFloatWorkload.hpp
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
armnn::LayerType::Dequantize
@ Dequantize
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1640
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1517
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1391
NeonConvertFp32ToFp16Workload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:735
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1534
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::LayerType::Unmap
@ Unmap
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1235
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1151
armnn::LayerType::QLstm
@ QLstm
NeonDequantizeWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1619
NeonSplitterWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1579
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1243
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
NeonLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1054
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
NeonActivationWorkload.hpp
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1367
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:940
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18