ArmNN
 24.02
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
8 
9 #include <armnn/Exceptions.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
18 
19 #if defined(ARMCOMPUTENEON_ENABLED)
90 #endif
91 
92 namespace armnn
93 {
94 
95 namespace
96 {
97 
98 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
99 {
100  if (!type)
101  {
102  return info;
103  }
104  return TensorInfo(info.GetShape(),
105  type.value(),
106  info.GetQuantizationScale(),
107  info.GetQuantizationOffset(),
108  info.IsConstant());
109 }
110 
111 template< typename ... Args>
112 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
113 {
114  IgnoreUnused(reasonIfUnsupported, (args)...);
115 #if defined(ARMCOMPUTENEON_ENABLED)
116  return true;
117 #else
118  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
119  return false;
120 #endif
121 }
122 
123 template<typename FloatFunc, typename Uint8Func, typename ... Params>
124 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
125  DataType dataType,
126  FloatFunc floatFuncPtr,
127  Uint8Func uint8FuncPtr,
128  Params&&... params)
129 {
130  return IsNeonBackendSupported(reasonIfUnsupported) &&
131  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
132  dataType,
133  floatFuncPtr,
134  floatFuncPtr,
135  uint8FuncPtr,
136  &FalseFunc<>,
137  &FalseFunc<>,
138  std::forward<Params>(params)...);
139 }
140 
141 #if defined(ARMCOMPUTENEON_ENABLED)
142 template<class FuncType, class... Args>
143 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
144 {
145  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
146  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
147  if (!supported && reasonIfUnsupported)
148  {
149  reasonIfUnsupported.value() = aclStatus.error_description();
150  }
151  return supported;
152 }
153 
154 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
155  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
156 #else
157 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
158  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
159 #endif
160 } // anonymous namespace
161 
163  : m_ModelContextPtr(modelContextPtr)
164 {
165 }
166 
168  : m_ModelContextPtr(nullptr)
169 {
170 }
171 
173  const std::vector<TensorInfo>& infos,
174  const BaseDescriptor& descriptor,
175  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
176  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
177  Optional<std::string&> reasonIfUnsupported,
178  const NeonLayerSupport& support)
179 {
180  switch (type)
181  {
183  return support.IsActivationSupported(infos[0],
184  infos[1],
185  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
186  reasonIfUnsupported);
187  case LayerType::Addition:
188  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
190  return support.IsArgMinMaxSupported(infos[0],
191  infos[1],
192  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
193  reasonIfUnsupported);
195  return support.IsBatchMatMulSupported(infos[0],
196  infos[1],
197  infos[2],
198  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
199  reasonIfUnsupported);
201  return support.IsBatchNormalizationSupported(infos[0],
202  infos[1],
203  infos[2],
204  infos[3],
205  infos[4],
206  infos[5],
207  *(PolymorphicDowncast<const
208  BatchNormalizationDescriptor*>(&descriptor)),
209  reasonIfUnsupported);
211  return support.IsBatchToSpaceNdSupported(infos[0],
212  infos[1],
213  *(PolymorphicDowncast<const
214  BatchToSpaceNdDescriptor*>(&descriptor)),
215  reasonIfUnsupported);
216  case LayerType::Cast:
217  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
219  return support.IsChannelShuffleSupported(infos[0],
220  infos[1],
221  *(PolymorphicDowncast<const
222  ChannelShuffleDescriptor*>(&descriptor)),
223  reasonIfUnsupported);
225  return support.IsComparisonSupported(infos[0],
226  infos[1],
227  infos[2],
228  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
229  reasonIfUnsupported);
230  case LayerType::Concat:
231  {
232  std::vector<const TensorInfo*> inputInfos;
233  for (uint32_t i = 0; i < (infos.size() - 1); i++)
234  {
235  inputInfos.push_back(&infos[i]);
236  }
237  return support.IsConcatSupported(inputInfos,
238  infos[infos.size() - 1],
239  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
240  reasonIfUnsupported);
241  }
242  case LayerType::Constant:
243  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
245  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
247  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
249  {
250  if (infos.size() != 4)
251  {
252  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
253  "TensorInfos should be of format: {input, output, weights, biases}.");
254  }
255 
256  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
257  if (infos[3] == TensorInfo())
258  {
259  return support.IsConvolution2dSupported(infos[0],
260  infos[1],
261  desc,
262  infos[2],
263  EmptyOptional(),
264  reasonIfUnsupported);
265  }
266  else
267  {
268  return support.IsConvolution2dSupported(infos[0],
269  infos[1],
270  desc,
271  infos[2],
272  infos[3],
273  reasonIfUnsupported);
274  }
275  }
277  {
278  if (infos.size() != 4)
279  {
280  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
281  "TensorInfos should be of format: {input, output, weights, biases}.");
282  }
283 
284  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
285  if (infos[3] == TensorInfo())
286  {
287  return support.IsConvolution3dSupported(infos[0],
288  infos[1],
289  desc,
290  infos[2],
291  EmptyOptional(),
292  reasonIfUnsupported);
293  }
294  else
295  {
296  return support.IsConvolution3dSupported(infos[0],
297  infos[1],
298  desc,
299  infos[2],
300  infos[3],
301  reasonIfUnsupported);
302  }
303  }
305  return support.IsDepthToSpaceSupported(infos[0],
306  infos[1],
307  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
308  reasonIfUnsupported);
310  {
311  if (infos.size() != 4)
312  {
313  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
314  "TensorInfos should be of format: {input, output, weights, biases}.");
315  }
316 
317  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
318  if (infos[3] == TensorInfo())
319  {
320  return support.IsDepthwiseConvolutionSupported(infos[0],
321  infos[1],
322  desc,
323  infos[2],
324  EmptyOptional(),
325  reasonIfUnsupported);
326  }
327  else
328  {
329  return support.IsDepthwiseConvolutionSupported(infos[0],
330  infos[1],
331  desc,
332  infos[2],
333  infos[3],
334  reasonIfUnsupported);
335  }
336  }
338  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
340  {
341  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
342  return support.IsDetectionPostProcessSupported(infos[0],
343  infos[1],
344  infos[2],
345  infos[3],
346  infos[4],
347  infos[5],
348  infos[6],
349  desc,
350  reasonIfUnsupported);
351  }
352  case LayerType::Division:
353  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
355  {
356  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
357 
358  switch (desc.m_Operation)
359  {
362  reasonIfUnsupported,
363  infos[0],
364  infos[1],
365  infos[2],
366  nullptr);
369  reasonIfUnsupported,
370  infos[0],
371  infos[1],
372  infos[2],
373  nullptr);
376  reasonIfUnsupported,
377  infos[0],
378  infos[1],
379  infos[2]);
382  reasonIfUnsupported,
383  infos[0],
384  infos[1],
385  infos[2]);
388  reasonIfUnsupported,
389  infos[0],
390  infos[1],
391  infos[2],
392  nullptr);
396  reasonIfUnsupported,
397  infos[0],
398  infos[1],
399  infos[2],
400  desc,
401  nullptr);
404  reasonIfUnsupported,
405  infos[0],
406  infos[1],
407  infos[2],
408  nullptr);
409  default:
410  return false;
411  }
412  }
414  return support.IsElementwiseUnarySupported(infos[0],
415  infos[1],
416  *(PolymorphicDowncast<const
417  ElementwiseUnaryDescriptor*>(&descriptor)),
418  reasonIfUnsupported);
419  case LayerType::Fill:
420  return support.IsFillSupported(infos[0],
421  infos[1],
422  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
423  reasonIfUnsupported);
424  case LayerType::Floor:
425  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
427  return support.IsFullyConnectedSupported(infos[0],
428  infos[1],
429  infos[2],
430  infos[3],
431  *(PolymorphicDowncast<const
432  FullyConnectedDescriptor*>(&descriptor)),
433  reasonIfUnsupported);
434  case LayerType::Fused:
435  {
436  auto fusedDescriptor = *(PolymorphicDowncast<const FusedDescriptor*>(&descriptor));
437  if (fusedDescriptor.m_NumInputSlots + fusedDescriptor.m_NumOutputSlots != infos.size())
438  {
439  throw InvalidArgumentException("Invalid number of FusedLayer TensorInfos.");
440  }
441 
442  auto it = infos.begin() + numeric_cast<TensorInfo::DifferenceType>(fusedDescriptor.m_NumInputSlots);
443  std::vector<TensorInfo> inputInfos(infos.begin(), it);
444  std::vector<TensorInfo> outputInfos(it, infos.end());
445 
446  return support.IsFusedSupported({inputInfos.begin(), inputInfos.end()},
447  {outputInfos.begin(), outputInfos.end()},
448  fusedDescriptor,
449  reasonIfUnsupported);
450  }
451  case LayerType::Gather:
452  return support.IsGatherSupported(infos[0],
453  infos[1],
454  infos[2],
455  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
456  reasonIfUnsupported);
457  case LayerType::GatherNd:
458  return support.IsGatherNdSupported(infos[0],
459  infos[1],
460  infos[2],
461  reasonIfUnsupported);
462  case LayerType::Input:
463  return support.IsInputSupported(infos[0], reasonIfUnsupported);
465  return support.IsInstanceNormalizationSupported(infos[0],
466  infos[1],
467  *(PolymorphicDowncast<const
468  InstanceNormalizationDescriptor*>(&descriptor)),
469  reasonIfUnsupported);
471  return support.IsL2NormalizationSupported(infos[0],
472  infos[1],
473  *(PolymorphicDowncast<const
474  L2NormalizationDescriptor*>(&descriptor)),
475  reasonIfUnsupported);
477  return support.IsLogicalBinarySupported(infos[0],
478  infos[1],
479  infos[2],
480  *(PolymorphicDowncast<const
481  LogicalBinaryDescriptor*>(&descriptor)),
482  reasonIfUnsupported);
484  return support.IsLogSoftmaxSupported(infos[0],
485  infos[1],
486  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
487  reasonIfUnsupported);
488  case LayerType::Lstm:
489  return support.IsLstmSupported(infos[0],
490  infos[1],
491  infos[2],
492  infos[3],
493  infos[4],
494  infos[5],
495  infos[6],
496  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
497  lstmParamsInfo.value(),
498  reasonIfUnsupported);
499  case LayerType::Map:
500  return true;
501  case LayerType::Maximum:
502  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
503  case LayerType::Mean:
504  return support.IsMeanSupported(infos[0],
505  infos[1],
506  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
507  reasonIfUnsupported);
508  case LayerType::MemCopy:
509  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
511  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
512  case LayerType::Merge:
513  return support.IsMergeSupported(infos[0],
514  infos[1],
515  infos[2],
516  reasonIfUnsupported);
517  case LayerType::Minimum:
518  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
520  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
522  return support.IsNormalizationSupported(infos[0],
523  infos[1],
524  *(PolymorphicDowncast<const
525  NormalizationDescriptor*>(&descriptor)),
526  reasonIfUnsupported);
527  case LayerType::Output:
528  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
529  case LayerType::Pad:
530  return support.IsPadSupported(infos[0],
531  infos[1],
532  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
533  reasonIfUnsupported);
534  case LayerType::Permute:
535  return support.IsPermuteSupported(infos[0],
536  infos[1],
537  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
538  reasonIfUnsupported);
540  return support.IsPooling2dSupported(infos[0],
541  infos[1],
542  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
543  reasonIfUnsupported);
545  return support.IsPooling3dSupported(infos[0],
546  infos[1],
547  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
548  reasonIfUnsupported);
549  case LayerType::Prelu:
550  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
551  case LayerType::QLstm:
552  return support.IsQLstmSupported(infos[0],
553  infos[1],
554  infos[2],
555  infos[3],
556  infos[4],
557  infos[5],
558  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
559  lstmParamsInfo.value(),
560  reasonIfUnsupported);
561  case LayerType::Quantize:
562  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
564  return support.IsQuantizedLstmSupported(infos[0],
565  infos[1],
566  infos[2],
567  infos[3],
568  infos[4],
569  quantizedLstmParamsInfo.value(),
570  reasonIfUnsupported);
571  case LayerType::Rank:
572  return true;
573  case LayerType::Reshape:
574  return support.IsReshapeSupported(infos[0],
575  infos[1],
576  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
577  reasonIfUnsupported);
578  case LayerType::Resize:
579  return support.IsResizeSupported(infos[0],
580  infos[1],
581  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
582  reasonIfUnsupported);
583  case LayerType::Reduce:
584  return support.IsReduceSupported(infos[0],
585  infos[1],
586  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
587  reasonIfUnsupported);
589  return support.IsReverseV2Supported(infos[0],
590  infos[1],
591  infos[2],
592  reasonIfUnsupported);
593  case LayerType::Shape:
594  return support.IsShapeSupported(infos[0],
595  infos[1],
596  reasonIfUnsupported);
597  case LayerType::Slice:
598  return support.IsSliceSupported(infos[0],
599  infos[1],
600  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
601  reasonIfUnsupported);
602  case LayerType::Softmax:
603  return support.IsSoftmaxSupported(infos[0],
604  infos[1],
605  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
606  reasonIfUnsupported);
608  return support.IsSpaceToBatchNdSupported(infos[0],
609  infos[1],
610  *(PolymorphicDowncast<const
611  SpaceToBatchNdDescriptor*>(&descriptor)),
612  reasonIfUnsupported);
614  return support.IsSpaceToDepthSupported(infos[0],
615  infos[1],
616  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
617  reasonIfUnsupported);
618  case LayerType::Splitter:
619  {
620  std::vector<TensorInfo> outputInfos;
621  for (uint32_t i = 1; i < infos.size(); i++)
622  {
623  outputInfos.push_back(infos[i]);
624  }
625  return support.IsSplitterSupported(infos[0],
626  {outputInfos.begin(), outputInfos.end()},
627  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
628  reasonIfUnsupported);
629  }
630  case LayerType::Stack:
631  {
632  std::vector<const TensorInfo*> inputInfos;
633  for (uint32_t i = 0; i < infos.size() - 1; i++)
634  {
635  inputInfos.push_back(&infos[i]);
636  }
637  return support.IsStackSupported(inputInfos,
638  infos[infos.size() - 1],
639  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
640  reasonIfUnsupported);
641  }
643  return support.IsStridedSliceSupported(infos[0],
644  infos[1],
645  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
646  reasonIfUnsupported);
648  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
649  case LayerType::Tile:
650  return support.IsTileSupported(infos[0],
651  infos[1],
652  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
653  reasonIfUnsupported);
655  return support.IsTransposeSupported(infos[0],
656  infos[1],
657  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
658  reasonIfUnsupported);
660  {
661  if (infos.size() != 4)
662  {
663  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
664  "TensorInfos should be of format: {input, output, weights, biases}.");
665  }
666 
667  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
668  if (infos[3] == TensorInfo())
669  {
670  return support.IsTransposeConvolution2dSupported(infos[0],
671  infos[1],
672  desc,
673  infos[2],
674  EmptyOptional(),
675  reasonIfUnsupported);
676  }
677  else
678  {
679  return support.IsTransposeConvolution2dSupported(infos[0],
680  infos[1],
681  desc,
682  infos[2],
683  infos[3],
684  reasonIfUnsupported);
685  }
686  }
688  {
689  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
690  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
691  infos[1],
692  infos[2],
693  infos[3],
694  infos[4],
695  infos[5],
696  desc,
697  lstmParamsInfo.value(),
698  reasonIfUnsupported);
699  }
700  case LayerType::Unmap:
701  return true;
702  default:
703  // layers not supported in neon by default:
704  // debug, fakequantization, precompiled,
705  // standin, switch
706  return false;
707  }
708 }
709 
711  const std::vector<TensorInfo>& infos,
712  const BaseDescriptor& descriptor,
713  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
714  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
715  Optional<std::string&> reasonIfUnsupported) const
716 {
717  bool isSupported = IsLayerTypeSupported(type,
718  infos,
719  descriptor,
720  lstmParamsInfo,
721  quantizedLstmParamsInfo,
722  reasonIfUnsupported,
723  *this);
724 
725  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
726  // architecture. If the available architecture is older than v8.2, we can check if the operator is
727  // supported by changing operator inputs & outputs to be FP32.
728  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
729  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
730  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
731  // and, insert convert layers around the FP32 operator.
732  if (reasonIfUnsupported.has_value())
733  {
734  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
735  if (!isSupported
736  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
737  {
738  std::vector<TensorInfo> newInfos;
739  for (auto info: infos)
740  {
741  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
742  }
743 
744  std::string tmpString;
745  return IsLayerTypeSupported(type,
746  newInfos,
747  descriptor,
748  lstmParamsInfo,
749  quantizedLstmParamsInfo,
750  tmpString,
751  *this);
752  }
753  }
754 
755  return isSupported;
756 }
757 
759  const TensorInfo& output,
760  const ActivationDescriptor& descriptor,
761  Optional<std::string&> reasonIfUnsupported) const
762 {
763  IgnoreUnused(descriptor);
765  reasonIfUnsupported,
766  input,
767  output,
768  descriptor);
769 }
770 
772  const TensorInfo& input1,
773  const TensorInfo& output,
774  Optional<std::string&> reasonIfUnsupported) const
775 {
777  reasonIfUnsupported,
778  input0,
779  input1,
780  output,
781  nullptr);
782 }
783 
785  const TensorInfo& output,
786  const ArgMinMaxDescriptor& descriptor,
787  Optional<std::string&> reasonIfUnsupported) const
788 {
790  reasonIfUnsupported,
791  input,
792  output,
793  descriptor);
794 }
795 
797  const TensorInfo& inputY,
798  const TensorInfo& output,
799  const BatchMatMulDescriptor& descriptor,
800  Optional<std::string&> reasonIfUnsupported) const
801 {
802  bool isFastMathEnabled = false;
803 #if defined(ARMCOMPUTENEON_ENABLED)
804  if (m_ModelContextPtr)
805  {
806  if (m_ModelContextPtr.get() != nullptr)
807  {
808  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
809  if (modelOptions)
810  {
811  isFastMathEnabled = modelOptions->IsFastMathEnabled();
812  }
813  }
814  }
815 #endif
817  reasonIfUnsupported,
818  inputX,
819  inputY,
820  output,
821  descriptor,
822  isFastMathEnabled,
823  nullptr);
824 }
825 
827  const TensorInfo& output,
828  const TensorInfo& mean,
829  const TensorInfo& var,
830  const TensorInfo& beta,
831  const TensorInfo& gamma,
832  const BatchNormalizationDescriptor& descriptor,
833  Optional<std::string&> reasonIfUnsupported) const
834 {
836  reasonIfUnsupported,
837  input,
838  output,
839  mean,
840  var,
841  beta,
842  gamma,
843  descriptor,
844  nullptr);
845 }
846 
848  const TensorInfo& output,
849  const BatchToSpaceNdDescriptor& descriptor,
850  Optional<std::string&> reasonIfUnsupported) const
851 {
853  reasonIfUnsupported,
854  input,
855  output,
856  descriptor);
857 }
858 
860  const TensorInfo& output,
861  Optional<std::string&> reasonIfUnsupported) const
862 {
864  reasonIfUnsupported,
865  input,
866  output);
867 }
868 
870  const TensorInfo& output,
871  const ChannelShuffleDescriptor& descriptor,
872  Optional<std::string&> reasonIfUnsupported) const
873 {
875  reasonIfUnsupported,
876  input,
877  output,
878  descriptor);
879 }
880 
882  const TensorInfo& input1,
883  const TensorInfo& output,
884  const ComparisonDescriptor& descriptor,
885  Optional<std::string&> reasonIfUnsupported) const
886 {
887 
889  reasonIfUnsupported,
890  input0,
891  input1,
892  output,
893  descriptor);
894 }
895 
896 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
897  const TensorInfo& output,
898  const OriginsDescriptor& descriptor,
899  Optional<std::string&> reasonIfUnsupported) const
900 {
901  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
902  {
903  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
904  return false;
905  }
906 
907  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
908  if(concatInnerAxis < 3) // Width, height, or channels
909  {
911  reasonIfUnsupported,
912  inputs,
913  output,
914  descriptor);
915  }
916  else if (concatInnerAxis == 3)
917  {
918  for (auto& input : inputs)
919  {
920  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
921  {
922  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
923  return false;
924  }
925  }
926  return true; // Sub-tensors support concat along batch
927  }
928  else // > 4 dimensions not supported.
929  {
930  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
931  return false;
932  }
933 }
934 
936  Optional<std::string&> reasonIfUnsupported) const
937 {
939  reasonIfUnsupported,
940  output);
941 }
942 
944  const TensorInfo& output,
945  Optional<std::string&> reasonIfUnsupported) const
946 {
948  reasonIfUnsupported,
949  input,
950  output);
951 }
952 
954  const TensorInfo& output,
955  Optional<std::string&> reasonIfUnsupported) const
956 {
958  reasonIfUnsupported,
959  input,
960  output);
961 }
962 
964  const TensorInfo& output,
965  const Convolution2dDescriptor& descriptor,
966  const TensorInfo& weights,
967  const Optional<TensorInfo>& biases,
968  Optional<std::string&> reasonIfUnsupported) const
969 {
970  bool isFastMathEnabled = false;
971 #if defined(ARMCOMPUTENEON_ENABLED)
972  if (m_ModelContextPtr)
973  {
974  if (m_ModelContextPtr.get() != nullptr)
975  {
976  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
977  if (modelOptions)
978  {
979  isFastMathEnabled = modelOptions->IsFastMathEnabled();
980  }
981  }
982  }
983 #endif
984 
986  reasonIfUnsupported,
987  input,
988  output,
989  descriptor,
990  weights,
991  biases,
992  isFastMathEnabled,
993  nullptr);
994 }
995 
997  const TensorInfo& output,
998  const Convolution3dDescriptor& descriptor,
999  const TensorInfo& weights,
1000  const Optional<TensorInfo>& biases,
1001  Optional<std::string&> reasonIfUnsupported) const
1002 {
1003  bool isFastMathEnabled = false;
1004 #if defined(ARMCOMPUTENEON_ENABLED)
1005  if (m_ModelContextPtr)
1006  {
1007  if (m_ModelContextPtr.get() != nullptr)
1008  {
1009  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1010  if (modelOptions)
1011  {
1012  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1013  }
1014  }
1015  }
1016 #endif
1017 
1019  reasonIfUnsupported,
1020  input,
1021  output,
1022  descriptor,
1023  weights,
1024  biases,
1025  isFastMathEnabled,
1026  nullptr);
1027 }
1028 
1030  const TensorInfo& output,
1031  const DepthToSpaceDescriptor& descriptor,
1032  Optional<std::string&> reasonIfUnsupported) const
1033 {
1035  reasonIfUnsupported,
1036  input,
1037  output,
1038  descriptor);
1039 }
1040 
1042  const TensorInfo& output,
1043  const DepthwiseConvolution2dDescriptor& descriptor,
1044  const TensorInfo& weights,
1045  const Optional<TensorInfo>& biases,
1046  Optional<std::string&> reasonIfUnsupported) const
1047 {
1049  reasonIfUnsupported,
1050  input,
1051  output,
1052  descriptor,
1053  weights,
1054  biases,
1055  nullptr);
1056 }
1057 
1059  const TensorInfo& output,
1060  Optional<std::string&> reasonIfUnsupported) const
1061 {
1063  reasonIfUnsupported,
1064  input,
1065  output);
1066 }
1067 
1069  const TensorInfo& output,
1070  const DepthwiseConvolution2dDescriptor& descriptor,
1071  const TensorInfo& weights,
1072  const Optional<TensorInfo>& biases,
1073  Optional<std::string&> reasonIfUnsupported) const
1074 {
1076  reasonIfUnsupported,
1077  input,
1078  output,
1079  descriptor,
1080  weights,
1081  biases,
1082  nullptr);
1083 }
1084 
1086  const TensorInfo& output,
1087  const ElementwiseUnaryDescriptor& descriptor,
1088  Optional<std::string&> reasonIfUnsupported) const
1089 {
1090  switch(descriptor.m_Operation)
1091  {
1092  case UnaryOperation::Abs:
1094  reasonIfUnsupported,
1095  input,
1096  output);
1097  case UnaryOperation::Exp:
1099  reasonIfUnsupported,
1100  input,
1101  output);
1104  reasonIfUnsupported,
1105  input,
1106  output);
1107  case UnaryOperation::Log:
1109  reasonIfUnsupported,
1110  input,
1111  output);
1112  case UnaryOperation::Neg:
1114  reasonIfUnsupported,
1115  input,
1116  output);
1117  case UnaryOperation::Rsqrt:
1119  reasonIfUnsupported,
1120  input,
1121  output);
1122  case UnaryOperation::Sin:
1124  reasonIfUnsupported,
1125  input,
1126  output);
1127  case UnaryOperation::Sqrt:
1129  reasonIfUnsupported,
1130  input,
1131  output);
1132  default:
1133  return false;
1134  }
1135 }
1136 
1138  const TensorInfo& output,
1139  const FillDescriptor& descriptor,
1140  Optional<std::string&> reasonIfUnsupported) const
1141 {
1142  armnn::IgnoreUnused(input);
1143  armnn::IgnoreUnused(output);
1144  armnn::IgnoreUnused(descriptor);
1145 
1146  return IsNeonBackendSupported(reasonIfUnsupported);
1147 }
1148 
1150  const TensorInfo& output,
1151  Optional<std::string&> reasonIfUnsupported) const
1152 {
1153  armnn::IgnoreUnused(output);
1154  return IsNeonBackendSupported(reasonIfUnsupported) &&
1155  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1156  input.GetDataType(),
1157  &FalseFuncF16<>,
1158  &TrueFunc<>,
1159  &FalseFuncU8<>,
1160  &FalseFuncI32<>,
1161  &FalseFuncU8<>);
1162 }
1163 
1165  const TensorInfo& output,
1166  const TensorInfo& weights,
1167  const TensorInfo& biases,
1168  const FullyConnectedDescriptor& descriptor,
1169  Optional<std::string&> reasonIfUnsupported) const
1170 {
1172  reasonIfUnsupported,
1173  input,
1174  output,
1175  weights,
1176  biases,
1177  descriptor,
1178  nullptr);
1179 }
1180 
1181 bool NeonLayerSupport::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
1182  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1183  const FusedDescriptor& descriptor,
1184  Optional<std::string&> reasonIfUnsupported) const
1185 {
1187  reasonIfUnsupported,
1188  inputs,
1189  outputs,
1190  descriptor,
1191  nullptr);
1192 }
1193 
1195  const TensorInfo& input1,
1196  const TensorInfo& output,
1197  const GatherDescriptor& descriptor,
1198  Optional<std::string&> reasonIfUnsupported) const
1199 {
1201  reasonIfUnsupported,
1202  input0,
1203  input1,
1204  output,
1205  descriptor);
1206 }
1207 
1209  const TensorInfo& input1,
1210  const TensorInfo& output,
1211  Optional<std::string&> reasonIfUnsupported) const
1212 {
1214  reasonIfUnsupported,
1215  input0,
1216  input1,
1217  output);
1218 }
1219 
1221  Optional<std::string&> reasonIfUnsupported) const
1222 {
1223  return IsNeonBackendSupported(reasonIfUnsupported, input);
1224 }
1225 
1227  const TensorInfo& output,
1228  const InstanceNormalizationDescriptor& descriptor,
1229  Optional<std::string&> reasonIfUnsupported) const
1230 {
1232  reasonIfUnsupported,
1233  input,
1234  output,
1235  descriptor);
1236 }
1237 
1239  const TensorInfo& output,
1240  const L2NormalizationDescriptor& descriptor,
1241  Optional<std::string&> reasonIfUnsupported) const
1242 {
1243  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1244 }
1245 
1247  const TensorInfo& input1,
1248  const TensorInfo& output,
1249  const LogicalBinaryDescriptor& descriptor,
1250  Optional<std::string&> reasonIfUnsupported) const
1251 {
1252  switch(descriptor.m_Operation)
1253  {
1256  reasonIfUnsupported,
1257  input0,
1258  input1,
1259  output);
1262  reasonIfUnsupported,
1263  input0,
1264  input1,
1265  output);
1266  default:
1267  return false;
1268  }
1269 }
1270 
1272  const TensorInfo& output,
1273  const LogSoftmaxDescriptor& descriptor,
1274  Optional<std::string&> reasonIfUnsupported) const
1275 {
1276  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1277 }
1278 
1280  const TensorInfo& outputStateIn,
1281  const TensorInfo& cellStateIn,
1282  const TensorInfo& scratchBuffer,
1283  const TensorInfo& outputStateOut,
1284  const TensorInfo& cellStateOut,
1285  const TensorInfo& output,
1286  const LstmDescriptor& descriptor,
1287  const LstmInputParamsInfo& paramsInfo,
1288  Optional<std::string&> reasonIfUnsupported) const
1289 {
1291  reasonIfUnsupported,
1292  input,
1293  outputStateIn,
1294  cellStateIn,
1295  scratchBuffer,
1296  outputStateOut,
1297  cellStateOut,
1298  output,
1299  descriptor,
1300  paramsInfo);
1301 }
1302 
1304  const TensorInfo& input1,
1305  const TensorInfo& output,
1306  Optional<std::string&> reasonIfUnsupported) const
1307 {
1309  reasonIfUnsupported,
1310  input0,
1311  input1,
1312  output);
1313 }
1314 
1316  const TensorInfo& output,
1317  const MeanDescriptor& descriptor,
1318  Optional<std::string&> reasonIfUnsupported) const
1319 {
1321  reasonIfUnsupported,
1322  input,
1323  output,
1324  descriptor);
1325 }
1326 
1328  const TensorInfo& input1,
1329  const TensorInfo& output,
1330  Optional<std::string&> reasonIfUnsupported) const
1331 {
1333  reasonIfUnsupported,
1334  input0,
1335  input1,
1336  output);
1337 }
1338 
1340  const TensorInfo& input1,
1341  const TensorInfo& output,
1342  Optional<std::string&> reasonIfUnsupported) const
1343 {
1345  reasonIfUnsupported,
1346  input0,
1347  input1,
1348  output,
1349  nullptr);
1350 }
1351 
1353  const TensorInfo& input1,
1354  const TensorInfo& output,
1355  Optional<std::string&> reasonIfUnsupported) const
1356 {
1358  reasonIfUnsupported,
1359  input0,
1360  input1,
1361  output,
1362  nullptr);
1363 }
1364 
1366  const TensorInfo& output,
1367  const NormalizationDescriptor& descriptor,
1368  Optional<std::string&> reasonIfUnsupported) const
1369 {
1371  reasonIfUnsupported,
1372  input,
1373  output,
1374  descriptor);
1375 }
1376 
1378  Optional<std::string&> reasonIfUnsupported) const
1379 {
1380  return IsNeonBackendSupported(reasonIfUnsupported, output);
1381 }
1382 
1384  const TensorInfo& output,
1385  const PadDescriptor& descriptor,
1386  Optional<std::string&> reasonIfUnsupported) const
1387 {
1389  reasonIfUnsupported,
1390  input,
1391  output,
1392  descriptor);
1393 }
1394 
1396  const TensorInfo& output,
1397  const PermuteDescriptor& descriptor,
1398  Optional<std::string&> reasonIfUnsupported) const
1399 {
1400  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1401 }
1402 
1404  const TensorInfo& output,
1405  const Pooling2dDescriptor& descriptor,
1406  Optional<std::string&> reasonIfUnsupported) const
1407 {
1408  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1409 }
1410 
1412  const TensorInfo& output,
1413  const Pooling3dDescriptor& descriptor,
1414  Optional<std::string&> reasonIfUnsupported) const
1415 {
1416  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1417 }
1418 
1420  const armnn::TensorInfo &alpha,
1421  const armnn::TensorInfo &output,
1422  armnn::Optional<std::string &> reasonIfUnsupported) const
1423 {
1424  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1425 }
1426 
1428  const TensorInfo& previousOutputIn,
1429  const TensorInfo& previousCellStateIn,
1430  const TensorInfo& outputStateOut,
1431  const TensorInfo& cellStateOut,
1432  const TensorInfo& output,
1433  const QLstmDescriptor& descriptor,
1434  const LstmInputParamsInfo& paramsInfo,
1435  Optional<std::string&> reasonIfUnsupported) const
1436 {
1437  // Check required here in order to pass IsLayerSupported for datatypes tests
1438  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1439  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1440  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1441  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1442  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1444  {
1446  reasonIfUnsupported,
1447  input,
1448  previousCellStateIn,
1449  previousOutputIn,
1450  cellStateOut,
1451  outputStateOut,
1452  output,
1453  descriptor,
1454  paramsInfo);
1455  }
1456  else
1457  {
1458  return false;
1459  }
1460 }
1461 
1463  const TensorInfo& output,
1464  Optional<std::string&> reasonIfUnsupported) const
1465 {
1467  reasonIfUnsupported,
1468  input,
1469  output);
1470 }
1471 
1473  const TensorInfo& cellStateIn,
1474  const TensorInfo& outputStateIn,
1475  const TensorInfo& cellStateOut,
1476  const TensorInfo& outputStateOut,
1477  const QuantizedLstmInputParamsInfo& paramsInfo,
1478  Optional<std::string&> reasonIfUnsupported) const
1479 {
1481  reasonIfUnsupported,
1482  input,
1483  cellStateIn,
1484  outputStateIn,
1485  cellStateOut,
1486  outputStateOut,
1487  paramsInfo);
1488 }
1489 
1491  const TensorInfo& output,
1492  const ReduceDescriptor& descriptor,
1493  Optional<std::string&> reasonIfUnsupported) const
1494 {
1496  reasonIfUnsupported,
1497  input,
1498  output,
1499  descriptor);
1500 }
1501 
1503  const TensorInfo& output,
1504  const ReshapeDescriptor& descriptor,
1505  Optional<std::string&> reasonIfUnsupported) const
1506 {
1507  armnn::IgnoreUnused(descriptor);
1509  reasonIfUnsupported,
1510  input,
1511  output);
1512 }
1513 
1515  const TensorInfo& output,
1516  const ResizeDescriptor& descriptor,
1517  Optional<std::string&> reasonIfUnsupported) const
1518 {
1520  reasonIfUnsupported,
1521  input,
1522  output,
1523  descriptor);
1524 }
1525 
1527  const armnn::TensorInfo &axis,
1528  const armnn::TensorInfo &output,
1529  Optional<std::string &> reasonIfUnsupported) const
1530 {
1532  reasonIfUnsupported,
1533  input,
1534  axis,
1535  output);
1536 }
1537 
1539  const TensorInfo& output,
1540  const SliceDescriptor& descriptor,
1541  Optional<std::string&> reasonIfUnsupported) const
1542 {
1544  reasonIfUnsupported,
1545  input,
1546  output,
1547  descriptor);
1548 }
1549 
1551  const TensorInfo& output,
1552  const SoftmaxDescriptor& descriptor,
1553  Optional<std::string&> reasonIfUnsupported) const
1554 {
1555  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1556 }
1557 
1559  const TensorInfo& output,
1560  const SpaceToBatchNdDescriptor& descriptor,
1561  Optional<std::string&> reasonIfUnsupported) const
1562 {
1564  reasonIfUnsupported,
1565  input,
1566  output,
1567  descriptor);
1568 }
1569 
1571  const TensorInfo& output,
1572  const SpaceToDepthDescriptor& descriptor,
1573  Optional<std::string&> reasonIfUnsupported) const
1574 {
1576  reasonIfUnsupported,
1577  input,
1578  output,
1579  descriptor);
1580 }
1581 
1583  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1584  const ViewsDescriptor& descriptor,
1585  Optional<std::string&> reasonIfUnsupported) const
1586 {
1587 #if defined(ARMCOMPUTENEON_ENABLED)
1588  // Split along the last dimension, cannot use sub-tensors
1589  // as width and height of the sub-tensors do not match
1590  // the width and height of the parent tensor
1591  // in case of input with more than 2D.
1592  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1593  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1594  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1595  {
1597  reasonIfUnsupported,
1598  input,
1599  outputs,
1600  *splitAxis.begin());
1601  }
1602 #endif
1603  IgnoreUnused(descriptor);
1604  for (auto output : outputs)
1605  {
1606  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1607  {
1608  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1609  return false;
1610  }
1611  }
1612  return true;
1613 }
1614 
1615 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1616  const TensorInfo& output,
1617  const StackDescriptor& descriptor,
1618  Optional<std::string&> reasonIfUnsupported) const
1619 {
1621  reasonIfUnsupported,
1622  inputs,
1623  output,
1624  descriptor);
1625 }
1626 
1628  const TensorInfo& output,
1629  const StridedSliceDescriptor& descriptor,
1630  Optional<std::string&> reasonIfUnsupported) const
1631 {
1633  reasonIfUnsupported,
1634  input,
1635  output,
1636  descriptor);
1637 }
1638 
1640  const TensorInfo& input1,
1641  const TensorInfo& output,
1642  Optional<std::string&> reasonIfUnsupported) const
1643 {
1645  reasonIfUnsupported,
1646  input0,
1647  input1,
1648  output,
1649  nullptr);
1650 }
1651 
1653  const TensorInfo& output,
1654  const TileDescriptor& descriptor,
1655  Optional<std::string&> reasonIfUnsupported) const
1656 {
1658  reasonIfUnsupported,
1659  input,
1660  output,
1661  descriptor);
1662 }
1663 
1665  const TensorInfo& output,
1666  const TransposeConvolution2dDescriptor& descriptor,
1667  const TensorInfo& weights,
1668  const Optional<TensorInfo>& biases,
1669  Optional<std::string&> reasonIfUnsupported) const
1670 {
1672  reasonIfUnsupported,
1673  input,
1674  output,
1675  descriptor,
1676  weights,
1677  biases);
1678 }
1679 
1681  const TensorInfo& output,
1682  const TransposeDescriptor& descriptor,
1683  Optional<std::string&> reasonIfUnsupported) const
1684 {
1685  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1686 }
1687 
1689  const TensorInfo& outputStateIn,
1690  const TensorInfo& cellStateIn,
1691  const TensorInfo& outputStateOut,
1692  const TensorInfo& cellStateOut,
1693  const TensorInfo& output,
1694  const UnidirectionalSequenceLstmDescriptor& descriptor,
1695  const LstmInputParamsInfo& paramsInfo,
1696  Optional<std::string&> reasonIfUnsupported) const
1697 {
1698  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1699  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1700  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1701  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1702  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1704  {
1706  reasonIfUnsupported,
1707  input,
1708  outputStateIn,
1709  cellStateIn,
1710  outputStateOut,
1711  cellStateOut,
1712  output,
1713  descriptor,
1714  paramsInfo);
1715  }
1716  else
1717  {
1719  reasonIfUnsupported,
1720  input,
1721  outputStateIn,
1722  cellStateIn,
1723  outputStateOut,
1724  cellStateOut,
1725  output,
1726  descriptor,
1727  paramsInfo);
1728  }
1729 }
1730 
1731 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:771
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
NeonConcatWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
NeonComparisonWorkload.hpp
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:15
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1058
NeonConvertFp16ToFp32Workload.hpp
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::BinaryOperation::Mul
@ Mul
armnn::NeonReverseV2WorkloadValidate
arm_compute::Status NeonReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: NeonReverseV2Workload.cpp:14
NeonAbsWorkload.hpp
NeonNegWorkload.hpp
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::NeonTileWorkloadValidate
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: NeonTileWorkload.cpp:14
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
NeonSoftmaxWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1558
NeonExpWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
NeonStridedSliceWorkload.hpp
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1395
NeonNormalizationFloatWorkload.hpp
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
NeonReverseV2Workload.hpp
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1615
NeonFusedWorkload.hpp
NeonAdditionWorkload.hpp
NeonMeanWorkload.hpp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:796
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1411
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:953
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
NeonDivisionWorkload.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1419
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
NeonPermuteWorkload.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
NeonBatchNormalizationWorkload.hpp
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
NeonPadWorkload.hpp
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1339
NeonQuantizeWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
NeonChannelShuffleWorkload.hpp
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Tile
@ Tile
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
NeonLogicalAndWorkload.hpp
NeonConvolution2dWorkload.hpp
NeonPooling2dWorkload.hpp
armnn::NeonLayerSupport::IsFusedSupported
bool IsFusedSupported(const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1181
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::LayerType::Stack
@ Stack
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
BackendRegistry.hpp
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:935
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1246
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:847
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
NeonSpaceToDepthWorkload.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:246
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NumericCast.hpp
NeonReshapeWorkload.hpp
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1502
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:826
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1208
NeonArgMinMaxWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1550
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
LayerSupportCommon.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1570
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: NeonLayerSupport.cpp:710
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::LayerType::Slice
@ Slice
NeonLogSoftmaxWorkload.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::BinaryOperation::Maximum
@ Maximum
armnn::FusedDescriptor
A FusedDescriptor for the FusedLayer.
Definition: Descriptors.hpp:944
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1514
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
NeonGatherWorkload.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1490
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1365
NeonPooling3dWorkload.hpp
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1538
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1639
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
NeonReduceWorkload.hpp
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::NeonLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1652
armnn::UnaryOperation::Exp
@ Exp
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:85
NeonLogicalOrWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1462
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1377
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
NeonDepthToSpaceWorkload.hpp
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1327
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
NeonDepthwiseConvolutionWorkload.hpp
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1383
NeonTransposeWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1029
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1680
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::LayerType::Addition
@ Addition
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
NeonBatchToSpaceNdWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
ArmComputeUtils.hpp
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1472
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
NeonQLstmWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonRsqrtWorkload.hpp
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:896
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
NeonSubtractionWorkload.hpp
NeonCastWorkload.hpp
armnn::NeonLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1526
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1164
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1085
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1041
armnn::LayerType::Division
@ Division
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
armnn::LayerType::Shape
@ Shape
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1137
NeonLogWorkload.hpp
NeonResizeWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
NeonTransposeConvolution2dWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
NeonFullyConnectedWorkload.hpp
NeonSinWorkload.hpp
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:15
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:869
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
NeonInstanceNormalizationWorkload.hpp
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:154
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1238
Tensor.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:784
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1226
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1664
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:32
NeonSpaceToBatchNdWorkload.hpp
NeonConstantWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::NeonFusedWorkloadValidate
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFusedWorkload.cpp:22
armnn::LayerType::Fill
@ Fill
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::LayerType::L2Normalization
@ L2Normalization
NeonLstmFloatWorkload.hpp
armnn::LayerType::Fused
@ Fused
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
NeonMaximumWorkload.hpp
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:859
NeonQuantizedLstmWorkload.hpp
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:306
armnn::LayerType::Minimum
@ Minimum
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1149
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:996
NeonBatchMatMulWorkload.hpp
NeonMinimumWorkload.hpp
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1220
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1315
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1194
NeonLogicalNotWorkload.hpp
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::LayerType::MemCopy
@ MemCopy
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:167
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
NeonSqrtWorkload.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:943
NeonStackWorkload.hpp
NeonBackendModelContext.hpp
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
NeonPreluWorkload.hpp
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1352
armnn::LayerType::Input
@ Input
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1068
armnn::LayerType::Resize
@ Resize
NeonElementwiseBinaryWorkload.hpp
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1303
NeonGatherNdWorkload.hpp
NeonTileWorkload.hpp
armnn::NeonElementwiseBinaryWorkloadValidate
arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonElementwiseBinaryWorkload.cpp:20
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:172
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:881
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::BinaryOperation::Div
@ Div
NeonMultiplicationWorkload.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
NeonUnidirectionalSequenceLstmWorkload.hpp
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
NeonL2NormalizationFloatWorkload.hpp
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
armnn::LayerType::Dequantize
@ Dequantize
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1688
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1427
NeonConvertFp32ToFp16Workload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:758
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1582
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::Unmap
@ Unmap
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1271
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
NeonDequantizeWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
NeonSplitterWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1627
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1279
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
NeonLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
NeonActivationWorkload.hpp
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1403
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:963
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18