ArmNN
 23.11
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
8 
9 #include <armnn/Exceptions.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
17 
18 #if defined(ARMCOMPUTENEON_ENABLED)
89 #endif
90 
91 namespace armnn
92 {
93 
94 namespace
95 {
96 
97 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
98 {
99  if (!type)
100  {
101  return info;
102  }
103  return TensorInfo(info.GetShape(),
104  type.value(),
105  info.GetQuantizationScale(),
106  info.GetQuantizationOffset(),
107  info.IsConstant());
108 }
109 
110 template< typename ... Args>
111 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
112 {
113  IgnoreUnused(reasonIfUnsupported, (args)...);
114 #if defined(ARMCOMPUTENEON_ENABLED)
115  return true;
116 #else
117  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
118  return false;
119 #endif
120 }
121 
122 template<typename FloatFunc, typename Uint8Func, typename ... Params>
123 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
124  DataType dataType,
125  FloatFunc floatFuncPtr,
126  Uint8Func uint8FuncPtr,
127  Params&&... params)
128 {
129  return IsNeonBackendSupported(reasonIfUnsupported) &&
130  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
131  dataType,
132  floatFuncPtr,
133  floatFuncPtr,
134  uint8FuncPtr,
135  &FalseFunc<>,
136  &FalseFunc<>,
137  std::forward<Params>(params)...);
138 }
139 
140 #if defined(ARMCOMPUTENEON_ENABLED)
141 template<class FuncType, class... Args>
142 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
143 {
144  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
145  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
146  if (!supported && reasonIfUnsupported)
147  {
148  reasonIfUnsupported.value() = aclStatus.error_description();
149  }
150  return supported;
151 }
152 
153 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
154  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
155 #else
156 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
157  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
158 #endif
159 } // anonymous namespace
160 
162  : m_ModelContextPtr(modelContextPtr)
163 {
164 }
165 
167  : m_ModelContextPtr(nullptr)
168 {
169 }
170 
172  const std::vector<TensorInfo>& infos,
173  const BaseDescriptor& descriptor,
174  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
175  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
176  Optional<std::string&> reasonIfUnsupported,
177  const NeonLayerSupport& support)
178 {
179  switch (type)
180  {
182  return support.IsActivationSupported(infos[0],
183  infos[1],
184  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
185  reasonIfUnsupported);
186  case LayerType::Addition:
187  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
189  return support.IsArgMinMaxSupported(infos[0],
190  infos[1],
191  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
192  reasonIfUnsupported);
194  return support.IsBatchMatMulSupported(infos[0],
195  infos[1],
196  infos[2],
197  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
198  reasonIfUnsupported);
200  return support.IsBatchNormalizationSupported(infos[0],
201  infos[1],
202  infos[2],
203  infos[3],
204  infos[4],
205  infos[5],
206  *(PolymorphicDowncast<const
207  BatchNormalizationDescriptor*>(&descriptor)),
208  reasonIfUnsupported);
210  return support.IsBatchToSpaceNdSupported(infos[0],
211  infos[1],
212  *(PolymorphicDowncast<const
213  BatchToSpaceNdDescriptor*>(&descriptor)),
214  reasonIfUnsupported);
215  case LayerType::Cast:
216  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
218  return support.IsChannelShuffleSupported(infos[0],
219  infos[1],
220  *(PolymorphicDowncast<const
221  ChannelShuffleDescriptor*>(&descriptor)),
222  reasonIfUnsupported);
224  return support.IsComparisonSupported(infos[0],
225  infos[1],
226  infos[2],
227  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
228  reasonIfUnsupported);
229  case LayerType::Concat:
230  {
231  std::vector<const TensorInfo*> inputInfos;
232  for (uint32_t i = 0; i < (infos.size() - 1); i++)
233  {
234  inputInfos.push_back(&infos[i]);
235  }
236  return support.IsConcatSupported(inputInfos,
237  infos[infos.size() - 1],
238  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
239  reasonIfUnsupported);
240  }
241  case LayerType::Constant:
242  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
244  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
246  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
248  {
249  if (infos.size() != 4)
250  {
251  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
252  "TensorInfos should be of format: {input, output, weights, biases}.");
253  }
254 
255  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
256  if (infos[3] == TensorInfo())
257  {
258  return support.IsConvolution2dSupported(infos[0],
259  infos[1],
260  desc,
261  infos[2],
262  EmptyOptional(),
263  reasonIfUnsupported);
264  }
265  else
266  {
267  return support.IsConvolution2dSupported(infos[0],
268  infos[1],
269  desc,
270  infos[2],
271  infos[3],
272  reasonIfUnsupported);
273  }
274  }
276  {
277  if (infos.size() != 4)
278  {
279  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
280  "TensorInfos should be of format: {input, output, weights, biases}.");
281  }
282 
283  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
284  if (infos[3] == TensorInfo())
285  {
286  return support.IsConvolution3dSupported(infos[0],
287  infos[1],
288  desc,
289  infos[2],
290  EmptyOptional(),
291  reasonIfUnsupported);
292  }
293  else
294  {
295  return support.IsConvolution3dSupported(infos[0],
296  infos[1],
297  desc,
298  infos[2],
299  infos[3],
300  reasonIfUnsupported);
301  }
302  }
304  return support.IsDepthToSpaceSupported(infos[0],
305  infos[1],
306  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
307  reasonIfUnsupported);
309  {
310  if (infos.size() != 4)
311  {
312  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
313  "TensorInfos should be of format: {input, output, weights, biases}.");
314  }
315 
316  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
317  if (infos[3] == TensorInfo())
318  {
319  return support.IsDepthwiseConvolutionSupported(infos[0],
320  infos[1],
321  desc,
322  infos[2],
323  EmptyOptional(),
324  reasonIfUnsupported);
325  }
326  else
327  {
328  return support.IsDepthwiseConvolutionSupported(infos[0],
329  infos[1],
330  desc,
331  infos[2],
332  infos[3],
333  reasonIfUnsupported);
334  }
335  }
337  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
339  {
340  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
341  return support.IsDetectionPostProcessSupported(infos[0],
342  infos[1],
343  infos[2],
344  infos[3],
345  infos[4],
346  infos[5],
347  infos[6],
348  desc,
349  reasonIfUnsupported);
350  }
351  case LayerType::Division:
352  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
354  {
355  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
356 
357  switch (desc.m_Operation)
358  {
361  reasonIfUnsupported,
362  infos[0],
363  infos[1],
364  infos[2],
365  nullptr);
368  reasonIfUnsupported,
369  infos[0],
370  infos[1],
371  infos[2],
372  nullptr);
375  reasonIfUnsupported,
376  infos[0],
377  infos[1],
378  infos[2]);
381  reasonIfUnsupported,
382  infos[0],
383  infos[1],
384  infos[2]);
387  reasonIfUnsupported,
388  infos[0],
389  infos[1],
390  infos[2],
391  nullptr);
395  reasonIfUnsupported,
396  infos[0],
397  infos[1],
398  infos[2],
399  desc,
400  nullptr);
403  reasonIfUnsupported,
404  infos[0],
405  infos[1],
406  infos[2],
407  nullptr);
408  default:
409  return false;
410  }
411  }
413  return support.IsElementwiseUnarySupported(infos[0],
414  infos[1],
415  *(PolymorphicDowncast<const
416  ElementwiseUnaryDescriptor*>(&descriptor)),
417  reasonIfUnsupported);
418  case LayerType::Fill:
419  return support.IsFillSupported(infos[0],
420  infos[1],
421  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
422  reasonIfUnsupported);
423  case LayerType::Floor:
424  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
426  return support.IsFullyConnectedSupported(infos[0],
427  infos[1],
428  infos[2],
429  infos[3],
430  *(PolymorphicDowncast<const
431  FullyConnectedDescriptor*>(&descriptor)),
432  reasonIfUnsupported);
433  case LayerType::Fused:
434  {
435  auto fusedDescriptor = *(PolymorphicDowncast<const FusedDescriptor*>(&descriptor));
436  if (fusedDescriptor.m_NumInputSlots + fusedDescriptor.m_NumOutputSlots != infos.size())
437  {
438  throw InvalidArgumentException("Invalid number of FusedLayer TensorInfos.");
439  }
440 
441  std::vector<TensorInfo> inputInfos(infos.begin(), infos.begin() + fusedDescriptor.m_NumInputSlots);
442  std::vector<TensorInfo> outputInfos(infos.begin() + fusedDescriptor.m_NumInputSlots, infos.end());
443 
444  return support.IsFusedSupported({inputInfos.begin(), inputInfos.end()},
445  {outputInfos.begin(), outputInfos.end()},
446  fusedDescriptor,
447  reasonIfUnsupported);
448  }
449  case LayerType::Gather:
450  return support.IsGatherSupported(infos[0],
451  infos[1],
452  infos[2],
453  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
454  reasonIfUnsupported);
455  case LayerType::GatherNd:
456  return support.IsGatherNdSupported(infos[0],
457  infos[1],
458  infos[2],
459  reasonIfUnsupported);
460  case LayerType::Input:
461  return support.IsInputSupported(infos[0], reasonIfUnsupported);
463  return support.IsInstanceNormalizationSupported(infos[0],
464  infos[1],
465  *(PolymorphicDowncast<const
466  InstanceNormalizationDescriptor*>(&descriptor)),
467  reasonIfUnsupported);
469  return support.IsL2NormalizationSupported(infos[0],
470  infos[1],
471  *(PolymorphicDowncast<const
472  L2NormalizationDescriptor*>(&descriptor)),
473  reasonIfUnsupported);
475  return support.IsLogicalBinarySupported(infos[0],
476  infos[1],
477  infos[2],
478  *(PolymorphicDowncast<const
479  LogicalBinaryDescriptor*>(&descriptor)),
480  reasonIfUnsupported);
482  return support.IsLogSoftmaxSupported(infos[0],
483  infos[1],
484  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
485  reasonIfUnsupported);
486  case LayerType::Lstm:
487  return support.IsLstmSupported(infos[0],
488  infos[1],
489  infos[2],
490  infos[3],
491  infos[4],
492  infos[5],
493  infos[6],
494  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
495  lstmParamsInfo.value(),
496  reasonIfUnsupported);
497  case LayerType::Map:
498  return true;
499  case LayerType::Maximum:
500  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
501  case LayerType::Mean:
502  return support.IsMeanSupported(infos[0],
503  infos[1],
504  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
505  reasonIfUnsupported);
506  case LayerType::MemCopy:
507  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
509  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
510  case LayerType::Merge:
511  return support.IsMergeSupported(infos[0],
512  infos[1],
513  infos[2],
514  reasonIfUnsupported);
515  case LayerType::Minimum:
516  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
518  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
520  return support.IsNormalizationSupported(infos[0],
521  infos[1],
522  *(PolymorphicDowncast<const
523  NormalizationDescriptor*>(&descriptor)),
524  reasonIfUnsupported);
525  case LayerType::Output:
526  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
527  case LayerType::Pad:
528  return support.IsPadSupported(infos[0],
529  infos[1],
530  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
531  reasonIfUnsupported);
532  case LayerType::Permute:
533  return support.IsPermuteSupported(infos[0],
534  infos[1],
535  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
536  reasonIfUnsupported);
538  return support.IsPooling2dSupported(infos[0],
539  infos[1],
540  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
541  reasonIfUnsupported);
543  return support.IsPooling3dSupported(infos[0],
544  infos[1],
545  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
546  reasonIfUnsupported);
547  case LayerType::Prelu:
548  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
549  case LayerType::QLstm:
550  return support.IsQLstmSupported(infos[0],
551  infos[1],
552  infos[2],
553  infos[3],
554  infos[4],
555  infos[5],
556  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
557  lstmParamsInfo.value(),
558  reasonIfUnsupported);
559  case LayerType::Quantize:
560  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
562  return support.IsQuantizedLstmSupported(infos[0],
563  infos[1],
564  infos[2],
565  infos[3],
566  infos[4],
567  quantizedLstmParamsInfo.value(),
568  reasonIfUnsupported);
569  case LayerType::Rank:
570  return true;
571  case LayerType::Reshape:
572  return support.IsReshapeSupported(infos[0],
573  infos[1],
574  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
575  reasonIfUnsupported);
576  case LayerType::Resize:
577  return support.IsResizeSupported(infos[0],
578  infos[1],
579  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
580  reasonIfUnsupported);
581  case LayerType::Reduce:
582  return support.IsReduceSupported(infos[0],
583  infos[1],
584  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
585  reasonIfUnsupported);
587  return support.IsReverseV2Supported(infos[0],
588  infos[1],
589  infos[2],
590  reasonIfUnsupported);
591  case LayerType::Shape:
592  return support.IsShapeSupported(infos[0],
593  infos[1],
594  reasonIfUnsupported);
595  case LayerType::Slice:
596  return support.IsSliceSupported(infos[0],
597  infos[1],
598  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
599  reasonIfUnsupported);
600  case LayerType::Softmax:
601  return support.IsSoftmaxSupported(infos[0],
602  infos[1],
603  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
604  reasonIfUnsupported);
606  return support.IsSpaceToBatchNdSupported(infos[0],
607  infos[1],
608  *(PolymorphicDowncast<const
609  SpaceToBatchNdDescriptor*>(&descriptor)),
610  reasonIfUnsupported);
612  return support.IsSpaceToDepthSupported(infos[0],
613  infos[1],
614  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
615  reasonIfUnsupported);
616  case LayerType::Splitter:
617  {
618  std::vector<TensorInfo> outputInfos;
619  for (uint32_t i = 1; i < infos.size(); i++)
620  {
621  outputInfos.push_back(infos[i]);
622  }
623  return support.IsSplitterSupported(infos[0],
624  {outputInfos.begin(), outputInfos.end()},
625  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
626  reasonIfUnsupported);
627  }
628  case LayerType::Stack:
629  {
630  std::vector<const TensorInfo*> inputInfos;
631  for (uint32_t i = 0; i < infos.size() - 1; i++)
632  {
633  inputInfos.push_back(&infos[i]);
634  }
635  return support.IsStackSupported(inputInfos,
636  infos[infos.size() - 1],
637  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
638  reasonIfUnsupported);
639  }
641  return support.IsStridedSliceSupported(infos[0],
642  infos[1],
643  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
644  reasonIfUnsupported);
646  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
647  case LayerType::Tile:
648  return support.IsTileSupported(infos[0],
649  infos[1],
650  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
651  reasonIfUnsupported);
653  return support.IsTransposeSupported(infos[0],
654  infos[1],
655  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
656  reasonIfUnsupported);
658  {
659  if (infos.size() != 4)
660  {
661  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
662  "TensorInfos should be of format: {input, output, weights, biases}.");
663  }
664 
665  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
666  if (infos[3] == TensorInfo())
667  {
668  return support.IsTransposeConvolution2dSupported(infos[0],
669  infos[1],
670  desc,
671  infos[2],
672  EmptyOptional(),
673  reasonIfUnsupported);
674  }
675  else
676  {
677  return support.IsTransposeConvolution2dSupported(infos[0],
678  infos[1],
679  desc,
680  infos[2],
681  infos[3],
682  reasonIfUnsupported);
683  }
684  }
686  {
687  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
688  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
689  infos[1],
690  infos[2],
691  infos[3],
692  infos[4],
693  infos[5],
694  desc,
695  lstmParamsInfo.value(),
696  reasonIfUnsupported);
697  }
698  case LayerType::Unmap:
699  return true;
700  default:
701  // layers not supported in neon by default:
702  // debug, fakequantization, precompiled,
703  // standin, switch
704  return false;
705  }
706 }
707 
709  const std::vector<TensorInfo>& infos,
710  const BaseDescriptor& descriptor,
711  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
712  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
713  Optional<std::string&> reasonIfUnsupported) const
714 {
715  bool isSupported = IsLayerTypeSupported(type,
716  infos,
717  descriptor,
718  lstmParamsInfo,
719  quantizedLstmParamsInfo,
720  reasonIfUnsupported,
721  *this);
722 
723  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
724  // architecture. If the available architecture is older than v8.2, we can check if the operator is
725  // supported by changing operator inputs & outputs to be FP32.
726  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
727  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
728  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
729  // and, insert convert layers around the FP32 operator.
730  if (reasonIfUnsupported.has_value())
731  {
732  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
733  if (!isSupported
734  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
735  {
736  std::vector<TensorInfo> newInfos;
737  for (auto info: infos)
738  {
739  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
740  }
741 
742  std::string tmpString;
743  return IsLayerTypeSupported(type,
744  newInfos,
745  descriptor,
746  lstmParamsInfo,
747  quantizedLstmParamsInfo,
748  tmpString,
749  *this);
750  }
751  }
752 
753  return isSupported;
754 }
755 
757  const TensorInfo& output,
758  const ActivationDescriptor& descriptor,
759  Optional<std::string&> reasonIfUnsupported) const
760 {
761  IgnoreUnused(descriptor);
763  reasonIfUnsupported,
764  input,
765  output,
766  descriptor);
767 }
768 
770  const TensorInfo& input1,
771  const TensorInfo& output,
772  Optional<std::string&> reasonIfUnsupported) const
773 {
775  reasonIfUnsupported,
776  input0,
777  input1,
778  output,
779  nullptr);
780 }
781 
783  const TensorInfo& output,
784  const ArgMinMaxDescriptor& descriptor,
785  Optional<std::string&> reasonIfUnsupported) const
786 {
788  reasonIfUnsupported,
789  input,
790  output,
791  descriptor);
792 }
793 
795  const TensorInfo& inputY,
796  const TensorInfo& output,
797  const BatchMatMulDescriptor& descriptor,
798  Optional<std::string&> reasonIfUnsupported) const
799 {
800  bool isFastMathEnabled = false;
801 #if defined(ARMCOMPUTENEON_ENABLED)
802  if (m_ModelContextPtr)
803  {
804  if (m_ModelContextPtr.get() != nullptr)
805  {
806  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
807  if (modelOptions)
808  {
809  isFastMathEnabled = modelOptions->IsFastMathEnabled();
810  }
811  }
812  }
813 #endif
815  reasonIfUnsupported,
816  inputX,
817  inputY,
818  output,
819  descriptor,
820  isFastMathEnabled,
821  nullptr);
822 }
823 
825  const TensorInfo& output,
826  const TensorInfo& mean,
827  const TensorInfo& var,
828  const TensorInfo& beta,
829  const TensorInfo& gamma,
830  const BatchNormalizationDescriptor& descriptor,
831  Optional<std::string&> reasonIfUnsupported) const
832 {
834  reasonIfUnsupported,
835  input,
836  output,
837  mean,
838  var,
839  beta,
840  gamma,
841  descriptor,
842  nullptr);
843 }
844 
846  const TensorInfo& output,
847  const BatchToSpaceNdDescriptor& descriptor,
848  Optional<std::string&> reasonIfUnsupported) const
849 {
851  reasonIfUnsupported,
852  input,
853  output,
854  descriptor);
855 }
856 
858  const TensorInfo& output,
859  Optional<std::string&> reasonIfUnsupported) const
860 {
862  reasonIfUnsupported,
863  input,
864  output);
865 }
866 
868  const TensorInfo& output,
869  const ChannelShuffleDescriptor& descriptor,
870  Optional<std::string&> reasonIfUnsupported) const
871 {
873  reasonIfUnsupported,
874  input,
875  output,
876  descriptor);
877 }
878 
880  const TensorInfo& input1,
881  const TensorInfo& output,
882  const ComparisonDescriptor& descriptor,
883  Optional<std::string&> reasonIfUnsupported) const
884 {
885 
887  reasonIfUnsupported,
888  input0,
889  input1,
890  output,
891  descriptor);
892 }
893 
894 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
895  const TensorInfo& output,
896  const OriginsDescriptor& descriptor,
897  Optional<std::string&> reasonIfUnsupported) const
898 {
899  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
900  {
901  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
902  return false;
903  }
904 
905  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
906  if(concatInnerAxis < 3) // Width, height, or channels
907  {
909  reasonIfUnsupported,
910  inputs,
911  output,
912  descriptor);
913  }
914  else if (concatInnerAxis == 3)
915  {
916  for (auto& input : inputs)
917  {
918  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
919  {
920  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
921  return false;
922  }
923  }
924  return true; // Sub-tensors support concat along batch
925  }
926  else // > 4 dimensions not supported.
927  {
928  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
929  return false;
930  }
931 }
932 
934  Optional<std::string&> reasonIfUnsupported) const
935 {
937  reasonIfUnsupported,
938  output);
939 }
940 
942  const TensorInfo& output,
943  Optional<std::string&> reasonIfUnsupported) const
944 {
946  reasonIfUnsupported,
947  input,
948  output);
949 }
950 
952  const TensorInfo& output,
953  Optional<std::string&> reasonIfUnsupported) const
954 {
956  reasonIfUnsupported,
957  input,
958  output);
959 }
960 
962  const TensorInfo& output,
963  const Convolution2dDescriptor& descriptor,
964  const TensorInfo& weights,
965  const Optional<TensorInfo>& biases,
966  Optional<std::string&> reasonIfUnsupported) const
967 {
968  bool isFastMathEnabled = false;
969 #if defined(ARMCOMPUTENEON_ENABLED)
970  if (m_ModelContextPtr)
971  {
972  if (m_ModelContextPtr.get() != nullptr)
973  {
974  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
975  if (modelOptions)
976  {
977  isFastMathEnabled = modelOptions->IsFastMathEnabled();
978  }
979  }
980  }
981 #endif
982 
984  reasonIfUnsupported,
985  input,
986  output,
987  descriptor,
988  weights,
989  biases,
990  isFastMathEnabled,
991  nullptr);
992 }
993 
995  const TensorInfo& output,
996  const Convolution3dDescriptor& descriptor,
997  const TensorInfo& weights,
998  const Optional<TensorInfo>& biases,
999  Optional<std::string&> reasonIfUnsupported) const
1000 {
1001  bool isFastMathEnabled = false;
1002 #if defined(ARMCOMPUTENEON_ENABLED)
1003  if (m_ModelContextPtr)
1004  {
1005  if (m_ModelContextPtr.get() != nullptr)
1006  {
1007  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1008  if (modelOptions)
1009  {
1010  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1011  }
1012  }
1013  }
1014 #endif
1015 
1017  reasonIfUnsupported,
1018  input,
1019  output,
1020  descriptor,
1021  weights,
1022  biases,
1023  isFastMathEnabled,
1024  nullptr);
1025 }
1026 
1028  const TensorInfo& output,
1029  const DepthToSpaceDescriptor& descriptor,
1030  Optional<std::string&> reasonIfUnsupported) const
1031 {
1033  reasonIfUnsupported,
1034  input,
1035  output,
1036  descriptor);
1037 }
1038 
1040  const TensorInfo& output,
1041  const DepthwiseConvolution2dDescriptor& descriptor,
1042  const TensorInfo& weights,
1043  const Optional<TensorInfo>& biases,
1044  Optional<std::string&> reasonIfUnsupported) const
1045 {
1047  reasonIfUnsupported,
1048  input,
1049  output,
1050  descriptor,
1051  weights,
1052  biases,
1053  nullptr);
1054 }
1055 
1057  const TensorInfo& output,
1058  Optional<std::string&> reasonIfUnsupported) const
1059 {
1061  reasonIfUnsupported,
1062  input,
1063  output);
1064 }
1065 
1067  const TensorInfo& output,
1068  const DepthwiseConvolution2dDescriptor& descriptor,
1069  const TensorInfo& weights,
1070  const Optional<TensorInfo>& biases,
1071  Optional<std::string&> reasonIfUnsupported) const
1072 {
1074  reasonIfUnsupported,
1075  input,
1076  output,
1077  descriptor,
1078  weights,
1079  biases,
1080  nullptr);
1081 }
1082 
1084  const TensorInfo& output,
1085  const ElementwiseUnaryDescriptor& descriptor,
1086  Optional<std::string&> reasonIfUnsupported) const
1087 {
1088  switch(descriptor.m_Operation)
1089  {
1090  case UnaryOperation::Abs:
1092  reasonIfUnsupported,
1093  input,
1094  output);
1095  case UnaryOperation::Exp:
1097  reasonIfUnsupported,
1098  input,
1099  output);
1102  reasonIfUnsupported,
1103  input,
1104  output);
1105  case UnaryOperation::Log:
1107  reasonIfUnsupported,
1108  input,
1109  output);
1110  case UnaryOperation::Neg:
1112  reasonIfUnsupported,
1113  input,
1114  output);
1115  case UnaryOperation::Rsqrt:
1117  reasonIfUnsupported,
1118  input,
1119  output);
1120  case UnaryOperation::Sin:
1122  reasonIfUnsupported,
1123  input,
1124  output);
1125  case UnaryOperation::Sqrt:
1127  reasonIfUnsupported,
1128  input,
1129  output);
1130  default:
1131  return false;
1132  }
1133 }
1134 
1136  const TensorInfo& output,
1137  const FillDescriptor& descriptor,
1138  Optional<std::string&> reasonIfUnsupported) const
1139 {
1140  armnn::IgnoreUnused(input);
1141  armnn::IgnoreUnused(output);
1142  armnn::IgnoreUnused(descriptor);
1143 
1144  return IsNeonBackendSupported(reasonIfUnsupported);
1145 }
1146 
1148  const TensorInfo& output,
1149  Optional<std::string&> reasonIfUnsupported) const
1150 {
1151  armnn::IgnoreUnused(output);
1152  return IsNeonBackendSupported(reasonIfUnsupported) &&
1153  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1154  input.GetDataType(),
1155  &FalseFuncF16<>,
1156  &TrueFunc<>,
1157  &FalseFuncU8<>,
1158  &FalseFuncI32<>,
1159  &FalseFuncU8<>);
1160 }
1161 
1163  const TensorInfo& output,
1164  const TensorInfo& weights,
1165  const TensorInfo& biases,
1166  const FullyConnectedDescriptor& descriptor,
1167  Optional<std::string&> reasonIfUnsupported) const
1168 {
1170  reasonIfUnsupported,
1171  input,
1172  output,
1173  weights,
1174  biases,
1175  descriptor,
1176  nullptr);
1177 }
1178 
1179 bool NeonLayerSupport::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
1180  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1181  const FusedDescriptor& descriptor,
1182  Optional<std::string&> reasonIfUnsupported) const
1183 {
1185  reasonIfUnsupported,
1186  inputs,
1187  outputs,
1188  descriptor,
1189  nullptr);
1190 }
1191 
1193  const TensorInfo& input1,
1194  const TensorInfo& output,
1195  const GatherDescriptor& descriptor,
1196  Optional<std::string&> reasonIfUnsupported) const
1197 {
1199  reasonIfUnsupported,
1200  input0,
1201  input1,
1202  output,
1203  descriptor);
1204 }
1205 
1207  const TensorInfo& input1,
1208  const TensorInfo& output,
1209  Optional<std::string&> reasonIfUnsupported) const
1210 {
1212  reasonIfUnsupported,
1213  input0,
1214  input1,
1215  output);
1216 }
1217 
1219  Optional<std::string&> reasonIfUnsupported) const
1220 {
1221  return IsNeonBackendSupported(reasonIfUnsupported, input);
1222 }
1223 
1225  const TensorInfo& output,
1226  const InstanceNormalizationDescriptor& descriptor,
1227  Optional<std::string&> reasonIfUnsupported) const
1228 {
1230  reasonIfUnsupported,
1231  input,
1232  output,
1233  descriptor);
1234 }
1235 
1237  const TensorInfo& output,
1238  const L2NormalizationDescriptor& descriptor,
1239  Optional<std::string&> reasonIfUnsupported) const
1240 {
1241  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1242 }
1243 
1245  const TensorInfo& input1,
1246  const TensorInfo& output,
1247  const LogicalBinaryDescriptor& descriptor,
1248  Optional<std::string&> reasonIfUnsupported) const
1249 {
1250  switch(descriptor.m_Operation)
1251  {
1254  reasonIfUnsupported,
1255  input0,
1256  input1,
1257  output);
1260  reasonIfUnsupported,
1261  input0,
1262  input1,
1263  output);
1264  default:
1265  return false;
1266  }
1267 }
1268 
1270  const TensorInfo& output,
1271  const LogSoftmaxDescriptor& descriptor,
1272  Optional<std::string&> reasonIfUnsupported) const
1273 {
1274  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1275 }
1276 
1278  const TensorInfo& outputStateIn,
1279  const TensorInfo& cellStateIn,
1280  const TensorInfo& scratchBuffer,
1281  const TensorInfo& outputStateOut,
1282  const TensorInfo& cellStateOut,
1283  const TensorInfo& output,
1284  const LstmDescriptor& descriptor,
1285  const LstmInputParamsInfo& paramsInfo,
1286  Optional<std::string&> reasonIfUnsupported) const
1287 {
1289  reasonIfUnsupported,
1290  input,
1291  outputStateIn,
1292  cellStateIn,
1293  scratchBuffer,
1294  outputStateOut,
1295  cellStateOut,
1296  output,
1297  descriptor,
1298  paramsInfo);
1299 }
1300 
1302  const TensorInfo& input1,
1303  const TensorInfo& output,
1304  Optional<std::string&> reasonIfUnsupported) const
1305 {
1307  reasonIfUnsupported,
1308  input0,
1309  input1,
1310  output);
1311 }
1312 
1314  const TensorInfo& output,
1315  const MeanDescriptor& descriptor,
1316  Optional<std::string&> reasonIfUnsupported) const
1317 {
1319  reasonIfUnsupported,
1320  input,
1321  output,
1322  descriptor);
1323 }
1324 
1326  const TensorInfo& input1,
1327  const TensorInfo& output,
1328  Optional<std::string&> reasonIfUnsupported) const
1329 {
1331  reasonIfUnsupported,
1332  input0,
1333  input1,
1334  output);
1335 }
1336 
1338  const TensorInfo& input1,
1339  const TensorInfo& output,
1340  Optional<std::string&> reasonIfUnsupported) const
1341 {
1343  reasonIfUnsupported,
1344  input0,
1345  input1,
1346  output,
1347  nullptr);
1348 }
1349 
1351  const TensorInfo& input1,
1352  const TensorInfo& output,
1353  Optional<std::string&> reasonIfUnsupported) const
1354 {
1356  reasonIfUnsupported,
1357  input0,
1358  input1,
1359  output,
1360  nullptr);
1361 }
1362 
1364  const TensorInfo& output,
1365  const NormalizationDescriptor& descriptor,
1366  Optional<std::string&> reasonIfUnsupported) const
1367 {
1369  reasonIfUnsupported,
1370  input,
1371  output,
1372  descriptor);
1373 }
1374 
1376  Optional<std::string&> reasonIfUnsupported) const
1377 {
1378  return IsNeonBackendSupported(reasonIfUnsupported, output);
1379 }
1380 
1382  const TensorInfo& output,
1383  const PadDescriptor& descriptor,
1384  Optional<std::string&> reasonIfUnsupported) const
1385 {
1387  reasonIfUnsupported,
1388  input,
1389  output,
1390  descriptor);
1391 }
1392 
1394  const TensorInfo& output,
1395  const PermuteDescriptor& descriptor,
1396  Optional<std::string&> reasonIfUnsupported) const
1397 {
1398  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1399 }
1400 
1402  const TensorInfo& output,
1403  const Pooling2dDescriptor& descriptor,
1404  Optional<std::string&> reasonIfUnsupported) const
1405 {
1406  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1407 }
1408 
1410  const TensorInfo& output,
1411  const Pooling3dDescriptor& descriptor,
1412  Optional<std::string&> reasonIfUnsupported) const
1413 {
1414  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1415 }
1416 
1418  const armnn::TensorInfo &alpha,
1419  const armnn::TensorInfo &output,
1420  armnn::Optional<std::string &> reasonIfUnsupported) const
1421 {
1422  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1423 }
1424 
1426  const TensorInfo& previousOutputIn,
1427  const TensorInfo& previousCellStateIn,
1428  const TensorInfo& outputStateOut,
1429  const TensorInfo& cellStateOut,
1430  const TensorInfo& output,
1431  const QLstmDescriptor& descriptor,
1432  const LstmInputParamsInfo& paramsInfo,
1433  Optional<std::string&> reasonIfUnsupported) const
1434 {
1435  // Check required here in order to pass IsLayerSupported for datatypes tests
1436  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1437  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1438  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1439  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1440  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1442  {
1444  reasonIfUnsupported,
1445  input,
1446  previousCellStateIn,
1447  previousOutputIn,
1448  cellStateOut,
1449  outputStateOut,
1450  output,
1451  descriptor,
1452  paramsInfo);
1453  }
1454  else
1455  {
1456  return false;
1457  }
1458 }
1459 
1461  const TensorInfo& output,
1462  Optional<std::string&> reasonIfUnsupported) const
1463 {
1465  reasonIfUnsupported,
1466  input,
1467  output);
1468 }
1469 
1471  const TensorInfo& cellStateIn,
1472  const TensorInfo& outputStateIn,
1473  const TensorInfo& cellStateOut,
1474  const TensorInfo& outputStateOut,
1475  const QuantizedLstmInputParamsInfo& paramsInfo,
1476  Optional<std::string&> reasonIfUnsupported) const
1477 {
1479  reasonIfUnsupported,
1480  input,
1481  cellStateIn,
1482  outputStateIn,
1483  cellStateOut,
1484  outputStateOut,
1485  paramsInfo);
1486 }
1487 
1489  const TensorInfo& output,
1490  const ReduceDescriptor& descriptor,
1491  Optional<std::string&> reasonIfUnsupported) const
1492 {
1494  reasonIfUnsupported,
1495  input,
1496  output,
1497  descriptor);
1498 }
1499 
1501  const TensorInfo& output,
1502  const ReshapeDescriptor& descriptor,
1503  Optional<std::string&> reasonIfUnsupported) const
1504 {
1505  armnn::IgnoreUnused(descriptor);
1507  reasonIfUnsupported,
1508  input,
1509  output);
1510 }
1511 
1513  const TensorInfo& output,
1514  const ResizeDescriptor& descriptor,
1515  Optional<std::string&> reasonIfUnsupported) const
1516 {
1518  reasonIfUnsupported,
1519  input,
1520  output,
1521  descriptor);
1522 }
1523 
1525  const armnn::TensorInfo &axis,
1526  const armnn::TensorInfo &output,
1527  Optional<std::string &> reasonIfUnsupported) const
1528 {
1530  reasonIfUnsupported,
1531  input,
1532  axis,
1533  output);
1534 }
1535 
1537  const TensorInfo& output,
1538  const SliceDescriptor& descriptor,
1539  Optional<std::string&> reasonIfUnsupported) const
1540 {
1542  reasonIfUnsupported,
1543  input,
1544  output,
1545  descriptor);
1546 }
1547 
1549  const TensorInfo& output,
1550  const SoftmaxDescriptor& descriptor,
1551  Optional<std::string&> reasonIfUnsupported) const
1552 {
1553  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1554 }
1555 
1557  const TensorInfo& output,
1558  const SpaceToBatchNdDescriptor& descriptor,
1559  Optional<std::string&> reasonIfUnsupported) const
1560 {
1562  reasonIfUnsupported,
1563  input,
1564  output,
1565  descriptor);
1566 }
1567 
1569  const TensorInfo& output,
1570  const SpaceToDepthDescriptor& descriptor,
1571  Optional<std::string&> reasonIfUnsupported) const
1572 {
1574  reasonIfUnsupported,
1575  input,
1576  output,
1577  descriptor);
1578 }
1579 
1581  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1582  const ViewsDescriptor& descriptor,
1583  Optional<std::string&> reasonIfUnsupported) const
1584 {
1585 #if defined(ARMCOMPUTENEON_ENABLED)
1586  // Split along the last dimension, cannot use sub-tensors
1587  // as width and height of the sub-tensors do not match
1588  // the width and height of the parent tensor
1589  // in case of input with more than 2D.
1590  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1591  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1592  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1593  {
1595  reasonIfUnsupported,
1596  input,
1597  outputs,
1598  *splitAxis.begin());
1599  }
1600 #endif
1601  IgnoreUnused(descriptor);
1602  for (auto output : outputs)
1603  {
1604  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1605  {
1606  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1607  return false;
1608  }
1609  }
1610  return true;
1611 }
1612 
1613 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1614  const TensorInfo& output,
1615  const StackDescriptor& descriptor,
1616  Optional<std::string&> reasonIfUnsupported) const
1617 {
1619  reasonIfUnsupported,
1620  inputs,
1621  output,
1622  descriptor);
1623 }
1624 
1626  const TensorInfo& output,
1627  const StridedSliceDescriptor& descriptor,
1628  Optional<std::string&> reasonIfUnsupported) const
1629 {
1631  reasonIfUnsupported,
1632  input,
1633  output,
1634  descriptor);
1635 }
1636 
1638  const TensorInfo& input1,
1639  const TensorInfo& output,
1640  Optional<std::string&> reasonIfUnsupported) const
1641 {
1643  reasonIfUnsupported,
1644  input0,
1645  input1,
1646  output,
1647  nullptr);
1648 }
1649 
1651  const TensorInfo& output,
1652  const TileDescriptor& descriptor,
1653  Optional<std::string&> reasonIfUnsupported) const
1654 {
1656  reasonIfUnsupported,
1657  input,
1658  output,
1659  descriptor);
1660 }
1661 
1663  const TensorInfo& output,
1664  const TransposeConvolution2dDescriptor& descriptor,
1665  const TensorInfo& weights,
1666  const Optional<TensorInfo>& biases,
1667  Optional<std::string&> reasonIfUnsupported) const
1668 {
1670  reasonIfUnsupported,
1671  input,
1672  output,
1673  descriptor,
1674  weights,
1675  biases);
1676 }
1677 
1679  const TensorInfo& output,
1680  const TransposeDescriptor& descriptor,
1681  Optional<std::string&> reasonIfUnsupported) const
1682 {
1683  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1684 }
1685 
1687  const TensorInfo& outputStateIn,
1688  const TensorInfo& cellStateIn,
1689  const TensorInfo& outputStateOut,
1690  const TensorInfo& cellStateOut,
1691  const TensorInfo& output,
1692  const UnidirectionalSequenceLstmDescriptor& descriptor,
1693  const LstmInputParamsInfo& paramsInfo,
1694  Optional<std::string&> reasonIfUnsupported) const
1695 {
1696  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1697  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1698  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1699  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1700  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1702  {
1704  reasonIfUnsupported,
1705  input,
1706  outputStateIn,
1707  cellStateIn,
1708  outputStateOut,
1709  cellStateOut,
1710  output,
1711  descriptor,
1712  paramsInfo);
1713  }
1714  else
1715  {
1717  reasonIfUnsupported,
1718  input,
1719  outputStateIn,
1720  cellStateIn,
1721  outputStateOut,
1722  cellStateOut,
1723  output,
1724  descriptor,
1725  paramsInfo);
1726  }
1727 }
1728 
1729 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:769
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
NeonConcatWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
NeonComparisonWorkload.hpp
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:15
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1056
NeonConvertFp16ToFp32Workload.hpp
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::BinaryOperation::Mul
@ Mul
armnn::NeonReverseV2WorkloadValidate
arm_compute::Status NeonReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: NeonReverseV2Workload.cpp:14
NeonAbsWorkload.hpp
NeonNegWorkload.hpp
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::NeonTileWorkloadValidate
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: NeonTileWorkload.cpp:14
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
NeonSoftmaxWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1556
NeonExpWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
NeonStridedSliceWorkload.hpp
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1393
NeonNormalizationFloatWorkload.hpp
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
NeonReverseV2Workload.hpp
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1613
NeonFusedWorkload.hpp
NeonAdditionWorkload.hpp
NeonMeanWorkload.hpp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:794
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1409
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:951
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
NeonDivisionWorkload.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1417
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
NeonPermuteWorkload.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
NeonBatchNormalizationWorkload.hpp
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
NeonPadWorkload.hpp
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1337
NeonQuantizeWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
NeonChannelShuffleWorkload.hpp
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Tile
@ Tile
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
NeonLogicalAndWorkload.hpp
NeonConvolution2dWorkload.hpp
NeonPooling2dWorkload.hpp
armnn::NeonLayerSupport::IsFusedSupported
bool IsFusedSupported(const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1179
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::LayerType::Stack
@ Stack
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
BackendRegistry.hpp
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:933
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1244
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:845
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
NeonSpaceToDepthWorkload.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:246
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NeonReshapeWorkload.hpp
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1500
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:824
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1206
NeonArgMinMaxWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1548
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
LayerSupportCommon.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1568
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: NeonLayerSupport.cpp:708
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::LayerType::Slice
@ Slice
NeonLogSoftmaxWorkload.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::BinaryOperation::Maximum
@ Maximum
armnn::FusedDescriptor
A FusedDescriptor for the FusedLayer.
Definition: Descriptors.hpp:944
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1512
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
NeonGatherWorkload.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1488
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1363
NeonPooling3dWorkload.hpp
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1536
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1637
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
NeonReduceWorkload.hpp
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::NeonLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1650
armnn::UnaryOperation::Exp
@ Exp
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:85
NeonLogicalOrWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1460
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1375
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
NeonDepthToSpaceWorkload.hpp
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1325
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
NeonDepthwiseConvolutionWorkload.hpp
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1381
NeonTransposeWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1027
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1678
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::LayerType::Addition
@ Addition
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
NeonBatchToSpaceNdWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
ArmComputeUtils.hpp
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1470
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
NeonQLstmWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonRsqrtWorkload.hpp
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:894
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
NeonSubtractionWorkload.hpp
NeonCastWorkload.hpp
armnn::NeonLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1524
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1162
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1083
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1039
armnn::LayerType::Division
@ Division
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
armnn::LayerType::Shape
@ Shape
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1135
NeonLogWorkload.hpp
NeonResizeWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
NeonTransposeConvolution2dWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
NeonFullyConnectedWorkload.hpp
NeonSinWorkload.hpp
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:15
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:867
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
NeonInstanceNormalizationWorkload.hpp
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:153
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1236
Tensor.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:782
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1224
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1662
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:32
NeonSpaceToBatchNdWorkload.hpp
NeonConstantWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::NeonFusedWorkloadValidate
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFusedWorkload.cpp:22
armnn::LayerType::Fill
@ Fill
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::LayerType::L2Normalization
@ L2Normalization
NeonLstmFloatWorkload.hpp
armnn::LayerType::Fused
@ Fused
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
NeonMaximumWorkload.hpp
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:857
NeonQuantizedLstmWorkload.hpp
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:306
armnn::LayerType::Minimum
@ Minimum
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1147
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:994
NeonBatchMatMulWorkload.hpp
NeonMinimumWorkload.hpp
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1218
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1313
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1192
NeonLogicalNotWorkload.hpp
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::LayerType::MemCopy
@ MemCopy
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:166
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
NeonSqrtWorkload.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:941
NeonStackWorkload.hpp
NeonBackendModelContext.hpp
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
NeonPreluWorkload.hpp
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1350
armnn::LayerType::Input
@ Input
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1066
armnn::LayerType::Resize
@ Resize
NeonElementwiseBinaryWorkload.hpp
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1301
NeonGatherNdWorkload.hpp
NeonTileWorkload.hpp
armnn::NeonElementwiseBinaryWorkloadValidate
arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonElementwiseBinaryWorkload.cpp:20
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:171
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:879
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::BinaryOperation::Div
@ Div
NeonMultiplicationWorkload.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
NeonUnidirectionalSequenceLstmWorkload.hpp
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
NeonL2NormalizationFloatWorkload.hpp
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
armnn::LayerType::Dequantize
@ Dequantize
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1686
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1425
NeonConvertFp32ToFp16Workload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:756
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1580
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::Unmap
@ Unmap
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1269
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
NeonDequantizeWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
NeonSplitterWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1625
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1277
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
NeonLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
NeonActivationWorkload.hpp
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1401
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:961
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18