ArmNN
 23.05
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/Types.hpp>
14 
15 #include <InternalTypes.hpp>
16 #include <LayerSupportCommon.hpp>
19 
20 #if defined(ARMCOMPUTENEON_ENABLED)
87 #endif
88 
89 namespace armnn
90 {
91 
92 namespace
93 {
94 
95 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
96 {
97  if (!type)
98  {
99  return info;
100  }
101  return TensorInfo(info.GetShape(),
102  type.value(),
103  info.GetQuantizationScale(),
104  info.GetQuantizationOffset(),
105  info.IsConstant());
106 }
107 
108 template< typename ... Args>
109 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
110 {
111  IgnoreUnused(reasonIfUnsupported, (args)...);
112 #if defined(ARMCOMPUTENEON_ENABLED)
113  return true;
114 #else
115  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
116  return false;
117 #endif
118 }
119 
120 template<typename FloatFunc, typename Uint8Func, typename ... Params>
121 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
122  DataType dataType,
123  FloatFunc floatFuncPtr,
124  Uint8Func uint8FuncPtr,
125  Params&&... params)
126 {
127  return IsNeonBackendSupported(reasonIfUnsupported) &&
128  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
129  dataType,
130  floatFuncPtr,
131  floatFuncPtr,
132  uint8FuncPtr,
133  &FalseFunc<>,
134  &FalseFunc<>,
135  std::forward<Params>(params)...);
136 }
137 
138 #if defined(ARMCOMPUTENEON_ENABLED)
139 template<class FuncType, class... Args>
140 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
141 {
142  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
143  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
144  if (!supported && reasonIfUnsupported)
145  {
146  reasonIfUnsupported.value() = aclStatus.error_description();
147  }
148  return supported;
149 }
150 
151 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
152  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
153 #else
154 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
155  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
156 #endif
157 } // anonymous namespace
158 
160  : m_ModelContextPtr(modelContextPtr)
161 {
162 }
163 
165  : m_ModelContextPtr(nullptr)
166 {
167 }
168 
170  const std::vector<TensorInfo>& infos,
171  const BaseDescriptor& descriptor,
172  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
173  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
174  Optional<std::string&> reasonIfUnsupported,
175  const NeonLayerSupport& support)
176 {
177  switch (type)
178  {
180  return support.IsActivationSupported(infos[0],
181  infos[1],
182  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
183  reasonIfUnsupported);
184  case LayerType::Addition:
185  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
187  return support.IsArgMinMaxSupported(infos[0],
188  infos[1],
189  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
190  reasonIfUnsupported);
192  return support.IsBatchMatMulSupported(infos[0],
193  infos[1],
194  infos[2],
195  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
196  reasonIfUnsupported);
198  return support.IsBatchNormalizationSupported(infos[0],
199  infos[1],
200  infos[2],
201  infos[3],
202  infos[4],
203  infos[5],
204  *(PolymorphicDowncast<const
205  BatchNormalizationDescriptor*>(&descriptor)),
206  reasonIfUnsupported);
208  return support.IsBatchToSpaceNdSupported(infos[0],
209  infos[1],
210  *(PolymorphicDowncast<const
211  BatchToSpaceNdDescriptor*>(&descriptor)),
212  reasonIfUnsupported);
213  case LayerType::Cast:
214  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
216  return support.IsChannelShuffleSupported(infos[0],
217  infos[1],
218  *(PolymorphicDowncast<const
219  ChannelShuffleDescriptor*>(&descriptor)),
220  reasonIfUnsupported);
222  return support.IsComparisonSupported(infos[0],
223  infos[1],
224  infos[2],
225  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
226  reasonIfUnsupported);
227  case LayerType::Concat:
228  {
229  std::vector<const TensorInfo*> inputInfos;
230  for (uint32_t i = 0; i < (infos.size() - 1); i++)
231  {
232  inputInfos.push_back(&infos[i]);
233  }
234  return support.IsConcatSupported(inputInfos,
235  infos[infos.size() - 1],
236  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
237  reasonIfUnsupported);
238  }
239  case LayerType::Constant:
240  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
242  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
244  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
246  {
247  if (infos.size() != 4)
248  {
249  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
250  "TensorInfos should be of format: {input, output, weights, biases}.");
251  }
252 
253  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
254  if (infos[3] == TensorInfo())
255  {
256  return support.IsConvolution2dSupported(infos[0],
257  infos[1],
258  desc,
259  infos[2],
260  EmptyOptional(),
261  reasonIfUnsupported);
262  }
263  else
264  {
265  return support.IsConvolution2dSupported(infos[0],
266  infos[1],
267  desc,
268  infos[2],
269  infos[3],
270  reasonIfUnsupported);
271  }
272  }
274  {
275  if (infos.size() != 4)
276  {
277  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
278  "TensorInfos should be of format: {input, output, weights, biases}.");
279  }
280 
281  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
282  if (infos[3] == TensorInfo())
283  {
284  return support.IsConvolution3dSupported(infos[0],
285  infos[1],
286  desc,
287  infos[2],
288  EmptyOptional(),
289  reasonIfUnsupported);
290  }
291  else
292  {
293  return support.IsConvolution3dSupported(infos[0],
294  infos[1],
295  desc,
296  infos[2],
297  infos[3],
298  reasonIfUnsupported);
299  }
300  }
302  return support.IsDepthToSpaceSupported(infos[0],
303  infos[1],
304  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
305  reasonIfUnsupported);
307  {
308  if (infos.size() != 4)
309  {
310  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
311  "TensorInfos should be of format: {input, output, weights, biases}.");
312  }
313 
314  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
315  if (infos[3] == TensorInfo())
316  {
317  return support.IsDepthwiseConvolutionSupported(infos[0],
318  infos[1],
319  desc,
320  infos[2],
321  EmptyOptional(),
322  reasonIfUnsupported);
323  }
324  else
325  {
326  return support.IsDepthwiseConvolutionSupported(infos[0],
327  infos[1],
328  desc,
329  infos[2],
330  infos[3],
331  reasonIfUnsupported);
332  }
333  }
335  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
337  {
338  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
339  return support.IsDetectionPostProcessSupported(infos[0],
340  infos[1],
341  infos[2],
342  infos[3],
343  infos[4],
344  infos[5],
345  infos[6],
346  desc,
347  reasonIfUnsupported);
348  }
349  case LayerType::Division:
350  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
352  {
353  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
354 
355  switch (desc.m_Operation)
356  {
359  reasonIfUnsupported,
360  infos[0],
361  infos[1],
362  infos[2],
363  nullptr);
366  reasonIfUnsupported,
367  infos[0],
368  infos[1],
369  infos[2],
370  nullptr);
373  reasonIfUnsupported,
374  infos[0],
375  infos[1],
376  infos[2]);
379  reasonIfUnsupported,
380  infos[0],
381  infos[1],
382  infos[2]);
385  reasonIfUnsupported,
386  infos[0],
387  infos[1],
388  infos[2],
389  nullptr);
392  reasonIfUnsupported,
393  infos[0],
394  infos[1],
395  infos[2],
396  nullptr);
397  default:
398  return false;
399  }
400  }
402  return support.IsElementwiseUnarySupported(infos[0],
403  infos[1],
404  *(PolymorphicDowncast<const
405  ElementwiseUnaryDescriptor*>(&descriptor)),
406  reasonIfUnsupported);
407  case LayerType::Fill:
408  return support.IsFillSupported(infos[0],
409  infos[1],
410  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
411  reasonIfUnsupported);
412  case LayerType::Floor:
413  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
415  return support.IsFullyConnectedSupported(infos[0],
416  infos[1],
417  infos[2],
418  infos[3],
419  *(PolymorphicDowncast<const
420  FullyConnectedDescriptor*>(&descriptor)),
421  reasonIfUnsupported);
422  case LayerType::Gather:
423  return support.IsGatherSupported(infos[0],
424  infos[1],
425  infos[2],
426  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
427  reasonIfUnsupported);
428  case LayerType::GatherNd:
429  return support.IsGatherNdSupported(infos[0],
430  infos[1],
431  infos[2],
432  reasonIfUnsupported);
433  case LayerType::Input:
434  return support.IsInputSupported(infos[0], reasonIfUnsupported);
436  return support.IsInstanceNormalizationSupported(infos[0],
437  infos[1],
438  *(PolymorphicDowncast<const
439  InstanceNormalizationDescriptor*>(&descriptor)),
440  reasonIfUnsupported);
442  return support.IsL2NormalizationSupported(infos[0],
443  infos[1],
444  *(PolymorphicDowncast<const
445  L2NormalizationDescriptor*>(&descriptor)),
446  reasonIfUnsupported);
448  return support.IsLogicalBinarySupported(infos[0],
449  infos[1],
450  infos[2],
451  *(PolymorphicDowncast<const
452  LogicalBinaryDescriptor*>(&descriptor)),
453  reasonIfUnsupported);
455  return support.IsLogSoftmaxSupported(infos[0],
456  infos[1],
457  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
458  reasonIfUnsupported);
459  case LayerType::Lstm:
460  return support.IsLstmSupported(infos[0],
461  infos[1],
462  infos[2],
463  infos[3],
464  infos[4],
465  infos[5],
466  infos[6],
467  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
468  lstmParamsInfo.value(),
469  reasonIfUnsupported);
470  case LayerType::Map:
471  return true;
472  case LayerType::Maximum:
473  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
474  case LayerType::Mean:
475  return support.IsMeanSupported(infos[0],
476  infos[1],
477  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
478  reasonIfUnsupported);
479  case LayerType::MemCopy:
480  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
482  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
483  case LayerType::Merge:
484  return support.IsMergeSupported(infos[0],
485  infos[1],
486  infos[2],
487  reasonIfUnsupported);
488  case LayerType::Minimum:
489  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
491  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
493  return support.IsNormalizationSupported(infos[0],
494  infos[1],
495  *(PolymorphicDowncast<const
496  NormalizationDescriptor*>(&descriptor)),
497  reasonIfUnsupported);
498  case LayerType::Output:
499  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
500  case LayerType::Pad:
501  return support.IsPadSupported(infos[0],
502  infos[1],
503  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
504  reasonIfUnsupported);
505  case LayerType::Permute:
506  return support.IsPermuteSupported(infos[0],
507  infos[1],
508  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
509  reasonIfUnsupported);
511  return support.IsPooling2dSupported(infos[0],
512  infos[1],
513  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
514  reasonIfUnsupported);
516  return support.IsPooling3dSupported(infos[0],
517  infos[1],
518  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
519  reasonIfUnsupported);
520  case LayerType::Prelu:
521  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
522  case LayerType::QLstm:
523  return support.IsQLstmSupported(infos[0],
524  infos[1],
525  infos[2],
526  infos[3],
527  infos[4],
528  infos[5],
529  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
530  lstmParamsInfo.value(),
531  reasonIfUnsupported);
532  case LayerType::Quantize:
533  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
535  return support.IsQuantizedLstmSupported(infos[0],
536  infos[1],
537  infos[2],
538  infos[3],
539  infos[4],
540  quantizedLstmParamsInfo.value(),
541  reasonIfUnsupported);
542  case LayerType::Rank:
543  return true;
544  case LayerType::Reshape:
545  return support.IsReshapeSupported(infos[0],
546  infos[1],
547  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
548  reasonIfUnsupported);
549  case LayerType::Resize:
550  return support.IsResizeSupported(infos[0],
551  infos[1],
552  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
553  reasonIfUnsupported);
554  case LayerType::Reduce:
555  return support.IsReduceSupported(infos[0],
556  infos[1],
557  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
558  reasonIfUnsupported);
559  case LayerType::Shape:
560  return support.IsShapeSupported(infos[0],
561  infos[1],
562  reasonIfUnsupported);
563  case LayerType::Slice:
564  return support.IsSliceSupported(infos[0],
565  infos[1],
566  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
567  reasonIfUnsupported);
568  case LayerType::Softmax:
569  return support.IsSoftmaxSupported(infos[0],
570  infos[1],
571  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
572  reasonIfUnsupported);
574  return support.IsSpaceToBatchNdSupported(infos[0],
575  infos[1],
576  *(PolymorphicDowncast<const
577  SpaceToBatchNdDescriptor*>(&descriptor)),
578  reasonIfUnsupported);
580  return support.IsSpaceToDepthSupported(infos[0],
581  infos[1],
582  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
583  reasonIfUnsupported);
584  case LayerType::Splitter:
585  {
586  std::vector<TensorInfo> outputInfos;
587  for (uint32_t i = 1; i < infos.size(); i++)
588  {
589  outputInfos.push_back(infos[i]);
590  }
591  return support.IsSplitterSupported(infos[0],
592  {outputInfos.begin(), outputInfos.end()},
593  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
594  reasonIfUnsupported);
595  }
596  case LayerType::Stack:
597  {
598  std::vector<const TensorInfo*> inputInfos;
599  for (uint32_t i = 0; i < infos.size() - 1; i++)
600  {
601  inputInfos.push_back(&infos[i]);
602  }
603  return support.IsStackSupported(inputInfos,
604  infos[infos.size() - 1],
605  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
606  reasonIfUnsupported);
607  }
609  return support.IsStridedSliceSupported(infos[0],
610  infos[1],
611  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
612  reasonIfUnsupported);
614  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
616  return support.IsTransposeSupported(infos[0],
617  infos[1],
618  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
619  reasonIfUnsupported);
621  {
622  if (infos.size() != 4)
623  {
624  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
625  "TensorInfos should be of format: {input, output, weights, biases}.");
626  }
627 
628  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
629  if (infos[3] == TensorInfo())
630  {
631  return support.IsTransposeConvolution2dSupported(infos[0],
632  infos[1],
633  desc,
634  infos[2],
635  EmptyOptional(),
636  reasonIfUnsupported);
637  }
638  else
639  {
640  return support.IsTransposeConvolution2dSupported(infos[0],
641  infos[1],
642  desc,
643  infos[2],
644  infos[3],
645  reasonIfUnsupported);
646  }
647  }
649  {
650  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
651  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
652  infos[1],
653  infos[2],
654  infos[3],
655  infos[4],
656  infos[5],
657  desc,
658  lstmParamsInfo.value(),
659  reasonIfUnsupported);
660  }
661  case LayerType::Unmap:
662  return true;
663  default:
664  // layers not supported in neon by default:
665  // debug, fakequantization, precompiled,
666  // standin, switch
667  return false;
668  }
669 }
670 
672  const std::vector<TensorInfo>& infos,
673  const BaseDescriptor& descriptor,
674  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
675  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
676  Optional<std::string&> reasonIfUnsupported) const
677 {
678  bool isSupported = IsLayerTypeSupported(type,
679  infos,
680  descriptor,
681  lstmParamsInfo,
682  quantizedLstmParamsInfo,
684  *this);
685 
686  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
687  // architecture. If the available architecture is older than v8.2, we can check if the operator is
688  // supported by changing operator inputs & outputs to be FP32.
689  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
690  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
691  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
692  // and, insert convert layers around the FP32 operator.
693  if (reasonIfUnsupported.has_value())
694  {
695  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
696  if (!isSupported
697  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
698  {
699  std::vector<TensorInfo> newInfos;
700  for (auto info: infos)
701  {
702  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
703  }
704 
705  std::string tmpString;
706  return IsLayerTypeSupported(type,
707  newInfos,
708  descriptor,
709  lstmParamsInfo,
710  quantizedLstmParamsInfo,
711  tmpString,
712  *this);
713  }
714  }
715 
716  return isSupported;
717 }
718 
720  const TensorInfo& output,
721  const ActivationDescriptor& descriptor,
722  Optional<std::string&> reasonIfUnsupported) const
723 {
727  input,
728  output,
729  descriptor);
730 }
731 
733  const TensorInfo& input1,
734  const TensorInfo& output,
735  Optional<std::string&> reasonIfUnsupported) const
736 {
739  input0,
740  input1,
741  output,
742  nullptr);
743 }
744 
746  const TensorInfo& output,
747  const ArgMinMaxDescriptor& descriptor,
748  Optional<std::string&> reasonIfUnsupported) const
749 {
752  input,
753  output,
754  descriptor);
755 }
756 
758  const TensorInfo& inputY,
759  const TensorInfo& output,
760  const BatchMatMulDescriptor& descriptor,
761  Optional<std::string&> reasonIfUnsupported) const
762 {
763  bool isFastMathEnabled = false;
764 #if defined(ARMCOMPUTENEON_ENABLED)
765  if (m_ModelContextPtr)
766  {
767  if (m_ModelContextPtr.get() != nullptr)
768  {
769  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
770  if (modelOptions)
771  {
772  isFastMathEnabled = modelOptions->IsFastMathEnabled();
773  }
774  }
775  }
776 #endif
779  inputX,
780  inputY,
781  output,
782  descriptor,
783  isFastMathEnabled,
784  nullptr);
785 }
786 
788  const TensorInfo& output,
789  const TensorInfo& mean,
790  const TensorInfo& var,
791  const TensorInfo& beta,
792  const TensorInfo& gamma,
793  const BatchNormalizationDescriptor& descriptor,
794  Optional<std::string&> reasonIfUnsupported) const
795 {
798  input,
799  output,
800  mean,
801  var,
802  beta,
803  gamma,
804  descriptor,
805  nullptr);
806 }
807 
809  const TensorInfo& output,
810  const BatchToSpaceNdDescriptor& descriptor,
811  Optional<std::string&> reasonIfUnsupported) const
812 {
815  input,
816  output,
817  descriptor);
818 }
819 
821  const TensorInfo& output,
822  Optional<std::string&> reasonIfUnsupported) const
823 {
826  input,
827  output);
828 }
829 
831  const TensorInfo& output,
832  const ChannelShuffleDescriptor& descriptor,
833  Optional<std::string&> reasonIfUnsupported) const
834 {
837  input,
838  output,
839  descriptor);
840 }
841 
843  const TensorInfo& input1,
844  const TensorInfo& output,
845  const ComparisonDescriptor& descriptor,
846  Optional<std::string&> reasonIfUnsupported) const
847 {
848 
851  input0,
852  input1,
853  output,
854  descriptor);
855 }
856 
857 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
858  const TensorInfo& output,
859  const OriginsDescriptor& descriptor,
860  Optional<std::string&> reasonIfUnsupported) const
861 {
862  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
863  {
864  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
865  return false;
866  }
867 
868  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
869  if(concatInnerAxis < 3) // Width, height, or channels
870  {
873  inputs,
874  output,
875  descriptor);
876  }
877  else if (concatInnerAxis == 3)
878  {
879  for (auto& input : inputs)
880  {
881  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
882  {
883  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
884  return false;
885  }
886  }
887  return true; // Sub-tensors support concat along batch
888  }
889  else // > 4 dimensions not supported.
890  {
891  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
892  return false;
893  }
894 }
895 
897  Optional<std::string&> reasonIfUnsupported) const
898 {
901  output);
902 }
903 
905  const TensorInfo& output,
906  Optional<std::string&> reasonIfUnsupported) const
907 {
910  input,
911  output);
912 }
913 
915  const TensorInfo& output,
916  Optional<std::string&> reasonIfUnsupported) const
917 {
920  input,
921  output);
922 }
923 
925  const TensorInfo& output,
926  const Convolution2dDescriptor& descriptor,
927  const TensorInfo& weights,
928  const Optional<TensorInfo>& biases,
929  Optional<std::string&> reasonIfUnsupported) const
930 {
931  bool isFastMathEnabled = false;
932 #if defined(ARMCOMPUTENEON_ENABLED)
933  if (m_ModelContextPtr)
934  {
935  if (m_ModelContextPtr.get() != nullptr)
936  {
937  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
938  if (modelOptions)
939  {
940  isFastMathEnabled = modelOptions->IsFastMathEnabled();
941  }
942  }
943  }
944 #endif
945 
948  input,
949  output,
950  descriptor,
951  weights,
952  biases,
953  isFastMathEnabled,
954  nullptr);
955 }
956 
958  const TensorInfo& output,
959  const Convolution3dDescriptor& descriptor,
960  const TensorInfo& weights,
961  const Optional<TensorInfo>& biases,
962  Optional<std::string&> reasonIfUnsupported) const
963 {
964  bool isFastMathEnabled = false;
965 #if defined(ARMCOMPUTENEON_ENABLED)
966  if (m_ModelContextPtr)
967  {
968  if (m_ModelContextPtr.get() != nullptr)
969  {
970  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
971  if (modelOptions)
972  {
973  isFastMathEnabled = modelOptions->IsFastMathEnabled();
974  }
975  }
976  }
977 #endif
978 
981  input,
982  output,
983  descriptor,
984  weights,
985  biases,
986  isFastMathEnabled,
987  nullptr);
988 }
989 
991  const TensorInfo& output,
992  const DepthToSpaceDescriptor& descriptor,
993  Optional<std::string&> reasonIfUnsupported) const
994 {
997  input,
998  output,
999  descriptor);
1000 }
1001 
1003  const TensorInfo& output,
1004  const DepthwiseConvolution2dDescriptor& descriptor,
1005  const TensorInfo& weights,
1006  const Optional<TensorInfo>& biases,
1007  Optional<std::string&> reasonIfUnsupported) const
1008 {
1011  input,
1012  output,
1013  descriptor,
1014  weights,
1015  biases,
1016  nullptr);
1017 }
1018 
1020  const TensorInfo& output,
1021  Optional<std::string&> reasonIfUnsupported) const
1022 {
1025  input,
1026  output);
1027 }
1028 
1030  const TensorInfo& output,
1031  const DepthwiseConvolution2dDescriptor& descriptor,
1032  const TensorInfo& weights,
1033  const Optional<TensorInfo>& biases,
1034  Optional<std::string&> reasonIfUnsupported) const
1035 {
1038  input,
1039  output,
1040  descriptor,
1041  weights,
1042  biases,
1043  nullptr);
1044 }
1045 
1047  const TensorInfo& output,
1048  const ElementwiseUnaryDescriptor& descriptor,
1049  Optional<std::string&> reasonIfUnsupported) const
1050 {
1051  switch(descriptor.m_Operation)
1052  {
1053  case UnaryOperation::Abs:
1056  input,
1057  output);
1058  case UnaryOperation::Exp:
1061  input,
1062  output);
1066  input,
1067  output);
1068  case UnaryOperation::Log:
1071  input,
1072  output);
1073  case UnaryOperation::Neg:
1076  input,
1077  output);
1078  case UnaryOperation::Rsqrt:
1081  input,
1082  output);
1083  case UnaryOperation::Sin:
1086  input,
1087  output);
1088  case UnaryOperation::Sqrt:
1091  input,
1092  output);
1093  default:
1094  return false;
1095  }
1096 }
1097 
1099  const TensorInfo& output,
1100  const FillDescriptor& descriptor,
1101  Optional<std::string&> reasonIfUnsupported) const
1102 {
1103  armnn::IgnoreUnused(input);
1106 
1107  return IsNeonBackendSupported(reasonIfUnsupported);
1108 }
1109 
1111  const TensorInfo& output,
1112  Optional<std::string&> reasonIfUnsupported) const
1113 {
1115  return IsNeonBackendSupported(reasonIfUnsupported) &&
1117  input.GetDataType(),
1118  &FalseFuncF16<>,
1119  &TrueFunc<>,
1120  &FalseFuncU8<>,
1121  &FalseFuncI32<>,
1122  &FalseFuncU8<>);
1123 }
1124 
1126  const TensorInfo& output,
1127  const TensorInfo& weights,
1128  const TensorInfo& biases,
1129  const FullyConnectedDescriptor& descriptor,
1130  Optional<std::string&> reasonIfUnsupported) const
1131 {
1134  input,
1135  output,
1136  weights,
1137  biases,
1138  descriptor,
1139  nullptr);
1140 }
1141 
1143  const TensorInfo& input1,
1144  const TensorInfo& output,
1145  const GatherDescriptor& descriptor,
1146  Optional<std::string&> reasonIfUnsupported) const
1147 {
1150  input0,
1151  input1,
1152  output,
1153  descriptor);
1154 }
1155 
1157  const TensorInfo& input1,
1158  const TensorInfo& output,
1159  Optional<std::string&> reasonIfUnsupported) const
1160 {
1163  input0,
1164  input1,
1165  output);
1166 }
1167 
1169  Optional<std::string&> reasonIfUnsupported) const
1170 {
1171  return IsNeonBackendSupported(reasonIfUnsupported, input);
1172 }
1173 
1175  const TensorInfo& output,
1176  const InstanceNormalizationDescriptor& descriptor,
1177  Optional<std::string&> reasonIfUnsupported) const
1178 {
1181  input,
1182  output,
1183  descriptor);
1184 }
1185 
1187  const TensorInfo& output,
1188  const L2NormalizationDescriptor& descriptor,
1189  Optional<std::string&> reasonIfUnsupported) const
1190 {
1192 }
1193 
1195  const TensorInfo& input1,
1196  const TensorInfo& output,
1197  const LogicalBinaryDescriptor& descriptor,
1198  Optional<std::string&> reasonIfUnsupported) const
1199 {
1200  switch(descriptor.m_Operation)
1201  {
1205  input0,
1206  input1,
1207  output);
1211  input0,
1212  input1,
1213  output);
1214  default:
1215  return false;
1216  }
1217 }
1218 
1220  const TensorInfo& output,
1221  const LogSoftmaxDescriptor& descriptor,
1222  Optional<std::string&> reasonIfUnsupported) const
1223 {
1225 }
1226 
1228  const TensorInfo& outputStateIn,
1229  const TensorInfo& cellStateIn,
1230  const TensorInfo& scratchBuffer,
1231  const TensorInfo& outputStateOut,
1232  const TensorInfo& cellStateOut,
1233  const TensorInfo& output,
1234  const LstmDescriptor& descriptor,
1235  const LstmInputParamsInfo& paramsInfo,
1236  Optional<std::string&> reasonIfUnsupported) const
1237 {
1240  input,
1241  outputStateIn,
1242  cellStateIn,
1243  scratchBuffer,
1245  cellStateOut,
1246  output,
1247  descriptor,
1248  paramsInfo);
1249 }
1250 
1252  const TensorInfo& input1,
1253  const TensorInfo& output,
1254  Optional<std::string&> reasonIfUnsupported) const
1255 {
1258  input0,
1259  input1,
1260  output);
1261 }
1262 
1264  const TensorInfo& output,
1265  const MeanDescriptor& descriptor,
1266  Optional<std::string&> reasonIfUnsupported) const
1267 {
1270  input,
1271  output,
1272  descriptor);
1273 }
1274 
1276  const TensorInfo& input1,
1277  const TensorInfo& output,
1278  Optional<std::string&> reasonIfUnsupported) const
1279 {
1282  input0,
1283  input1,
1284  output);
1285 }
1286 
1288  const TensorInfo& input1,
1289  const TensorInfo& output,
1290  Optional<std::string&> reasonIfUnsupported) const
1291 {
1294  input0,
1295  input1,
1296  output,
1297  nullptr);
1298 }
1299 
1301  const TensorInfo& input1,
1302  const TensorInfo& output,
1303  Optional<std::string&> reasonIfUnsupported) const
1304 {
1307  input0,
1308  input1,
1309  output,
1310  nullptr);
1311 }
1312 
1314  const TensorInfo& output,
1315  const NormalizationDescriptor& descriptor,
1316  Optional<std::string&> reasonIfUnsupported) const
1317 {
1320  input,
1321  output,
1322  descriptor);
1323 }
1324 
1326  Optional<std::string&> reasonIfUnsupported) const
1327 {
1328  return IsNeonBackendSupported(reasonIfUnsupported, output);
1329 }
1330 
1332  const TensorInfo& output,
1333  const PadDescriptor& descriptor,
1334  Optional<std::string&> reasonIfUnsupported) const
1335 {
1338  input,
1339  output,
1340  descriptor);
1341 }
1342 
1344  const TensorInfo& output,
1345  const PermuteDescriptor& descriptor,
1346  Optional<std::string&> reasonIfUnsupported) const
1347 {
1349 }
1350 
1352  const TensorInfo& output,
1353  const Pooling2dDescriptor& descriptor,
1354  Optional<std::string&> reasonIfUnsupported) const
1355 {
1357 }
1358 
1360  const TensorInfo& output,
1361  const Pooling3dDescriptor& descriptor,
1362  Optional<std::string&> reasonIfUnsupported) const
1363 {
1365 }
1366 
1368  const armnn::TensorInfo &alpha,
1369  const armnn::TensorInfo &output,
1370  armnn::Optional<std::string &> reasonIfUnsupported) const
1371 {
1373 }
1374 
1376  const TensorInfo& previousOutputIn,
1377  const TensorInfo& previousCellStateIn,
1378  const TensorInfo& outputStateOut,
1379  const TensorInfo& cellStateOut,
1380  const TensorInfo& output,
1381  const QLstmDescriptor& descriptor,
1382  const LstmInputParamsInfo& paramsInfo,
1383  Optional<std::string&> reasonIfUnsupported) const
1384 {
1385  // Check required here in order to pass IsLayerSupported for datatypes tests
1386  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1392  {
1395  input,
1398  cellStateOut,
1400  output,
1401  descriptor,
1402  paramsInfo);
1403  }
1404  else
1405  {
1406  return false;
1407  }
1408 }
1409 
1411  const TensorInfo& output,
1412  Optional<std::string&> reasonIfUnsupported) const
1413 {
1416  input,
1417  output);
1418 }
1419 
1421  const TensorInfo& cellStateIn,
1422  const TensorInfo& outputStateIn,
1423  const TensorInfo& cellStateOut,
1424  const TensorInfo& outputStateOut,
1425  const QuantizedLstmInputParamsInfo& paramsInfo,
1426  Optional<std::string&> reasonIfUnsupported) const
1427 {
1430  input,
1431  cellStateIn,
1432  outputStateIn,
1433  cellStateOut,
1435  paramsInfo);
1436 }
1437 
1439  const TensorInfo& output,
1440  const ReduceDescriptor& descriptor,
1441  Optional<std::string&> reasonIfUnsupported) const
1442 {
1445  input,
1446  output,
1447  descriptor);
1448 }
1449 
1451  const TensorInfo& output,
1452  const ReshapeDescriptor& descriptor,
1453  Optional<std::string&> reasonIfUnsupported) const
1454 {
1458  input,
1459  output);
1460 }
1461 
1463  const TensorInfo& output,
1464  const ResizeDescriptor& descriptor,
1465  Optional<std::string&> reasonIfUnsupported) const
1466 {
1469  input,
1470  output,
1471  descriptor);
1472 }
1473 
1475  const TensorInfo& output,
1476  const SliceDescriptor& descriptor,
1477  Optional<std::string&> reasonIfUnsupported) const
1478 {
1481  input,
1482  output,
1483  descriptor);
1484 }
1485 
1487  const TensorInfo& output,
1488  const SoftmaxDescriptor& descriptor,
1489  Optional<std::string&> reasonIfUnsupported) const
1490 {
1492 }
1493 
1495  const TensorInfo& output,
1496  const SpaceToBatchNdDescriptor& descriptor,
1497  Optional<std::string&> reasonIfUnsupported) const
1498 {
1501  input,
1502  output,
1503  descriptor);
1504 }
1505 
1507  const TensorInfo& output,
1508  const SpaceToDepthDescriptor& descriptor,
1509  Optional<std::string&> reasonIfUnsupported) const
1510 {
1513  input,
1514  output,
1515  descriptor);
1516 }
1517 
1519  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1520  const ViewsDescriptor& descriptor,
1521  Optional<std::string&> reasonIfUnsupported) const
1522 {
1523 #if defined(ARMCOMPUTENEON_ENABLED)
1524  // Split along the last dimension, cannot use sub-tensors
1525  // as width and height of the sub-tensors do not match
1526  // the width and height of the parent tensor
1527  // in case of input with more than 2D.
1528  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1529  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1530  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1531  {
1534  input,
1535  outputs,
1536  *splitAxis.begin());
1537  }
1538 #endif
1540  for (auto output : outputs)
1541  {
1542  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1543  {
1544  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1545  return false;
1546  }
1547  }
1548  return true;
1549 }
1550 
1551 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1552  const TensorInfo& output,
1553  const StackDescriptor& descriptor,
1554  Optional<std::string&> reasonIfUnsupported) const
1555 {
1558  inputs,
1559  output,
1560  descriptor);
1561 }
1562 
1564  const TensorInfo& output,
1565  const StridedSliceDescriptor& descriptor,
1566  Optional<std::string&> reasonIfUnsupported) const
1567 {
1570  input,
1571  output,
1572  descriptor);
1573 }
1574 
1576  const TensorInfo& input1,
1577  const TensorInfo& output,
1578  Optional<std::string&> reasonIfUnsupported) const
1579 {
1582  input0,
1583  input1,
1584  output,
1585  nullptr);
1586 }
1587 
1589  const TensorInfo& output,
1590  const TransposeConvolution2dDescriptor& descriptor,
1591  const TensorInfo& weights,
1592  const Optional<TensorInfo>& biases,
1593  Optional<std::string&> reasonIfUnsupported) const
1594 {
1597  input,
1598  output,
1599  descriptor,
1600  weights,
1601  biases);
1602 }
1603 
1605  const TensorInfo& output,
1606  const TransposeDescriptor& descriptor,
1607  Optional<std::string&> reasonIfUnsupported) const
1608 {
1610 }
1611 
1613  const TensorInfo& outputStateIn,
1614  const TensorInfo& cellStateIn,
1615  const TensorInfo& outputStateOut,
1616  const TensorInfo& cellStateOut,
1617  const TensorInfo& output,
1618  const UnidirectionalSequenceLstmDescriptor& descriptor,
1619  const LstmInputParamsInfo& paramsInfo,
1620  Optional<std::string&> reasonIfUnsupported) const
1621 {
1622  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1628  {
1631  input,
1632  outputStateIn,
1633  cellStateIn,
1635  cellStateOut,
1636  output,
1637  descriptor,
1638  paramsInfo);
1639  }
1640  else
1641  {
1644  input,
1645  outputStateIn,
1646  cellStateIn,
1648  cellStateOut,
1649  output,
1650  descriptor,
1651  paramsInfo);
1652  }
1653 }
1654 
1655 } // namespace armnn
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1462
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:808
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1351
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
NeonPooling2dWorkload.hpp
armnn::LayerType::MemCopy
@ MemCopy
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1046
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
NeonQuantizeWorkload.hpp
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::LayerType::Transpose
@ Transpose
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:32
NeonAbsWorkload.hpp
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
NeonResizeWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:932
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:820
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:757
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1457
NeonQLstmWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1486
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:990
NeonActivationWorkload.hpp
NeonArgMinMaxWorkload.hpp
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:745
NeonGatherNdWorkload.hpp
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1218
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
NeonBatchToSpaceNdWorkload.hpp
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1195
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1343
NeonConvertFp16ToFp32Workload.hpp
NeonSplitterWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1506
armnn::LayerType::Input
@ Input
NeonBackendModelContext.hpp
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1069
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1604
NeonConstantWorkload.hpp
NeonExpWorkload.hpp
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:495
NeonMaximumWorkload.hpp
armnn::LayerType::Quantize
@ Quantize
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1518
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
NeonInstanceNormalizationWorkload.hpp
NeonDequantizeWorkload.hpp
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1551
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:952
armnn::LayerType::ArgMinMax
@ ArgMinMax
NeonConcatWorkload.hpp
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1270
armnn::LayerType::Subtraction
@ Subtraction
NeonL2NormalizationFloatWorkload.hpp
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1275
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::UnaryOperation::Exp
@ Exp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:419
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1505
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
NeonBatchMatMulWorkload.hpp
NeonMeanWorkload.hpp
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::Neg
@ Neg
NeonLogicalNotWorkload.hpp
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
NeonMinimumWorkload.hpp
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::LayerType::Merge
@ Merge
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:551
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::LayerType::Permute
@ Permute
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:842
NeonChannelShuffleWorkload.hpp
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:20
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1300
armnn::LayerType::Reduce
@ Reduce
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1551
NeonSubtractionWorkload.hpp
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1263
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
NeonLayerSupport.hpp
armnn::LayerType::Division
@ Division
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1287
NeonSpaceToBatchNdWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1410
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1494
NeonSqrtWorkload.hpp
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1313
NeonReduceWorkload.hpp
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:787
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
NeonLogicalAndWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:863
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1042
NeonUnidirectionalSequenceLstmWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1002
armnn::LayerType::Activation
@ Activation
armnn::BinaryOperation::Sub
@ Sub
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1588
NeonMultiplicationWorkload.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
NeonStridedSliceWorkload.hpp
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:757
NeonDivisionWorkload.hpp
NeonBatchNormalizationWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1174
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:896
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:233
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:913
NeonReshapeWorkload.hpp
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::LayerType::Reshape
@ Reshape
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1325
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1474
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Fill
@ Fill
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:17
armnn::LayerType::Resize
@ Resize
NeonFullyConnectedWorkload.hpp
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
NeonGatherWorkload.hpp
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
NeonNegWorkload.hpp
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1438
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::LayerType::Rank
@ Rank
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:647
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1139
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
armnn::BinaryOperation::Mul
@ Mul
NeonQuantizedLstmWorkload.hpp
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:830
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
NeonBackendId.hpp
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
ArmComputeTensorUtils.hpp
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1227
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1450
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
armnn::LayerType::Pooling2d
@ Pooling2d
InternalTypes.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:797
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1186
armnn::DataType::Float32
@ Float32
NeonNormalizationFloatWorkload.hpp
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:20
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1529
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:576
armnn::LayerType::GatherNd
@ GatherNd
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
ArmComputeUtils.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
NeonSinWorkload.hpp
armnn::LayerType::Constant
@ Constant
armnn::ILayerSupport::var
const TensorInfo const TensorInfo const TensorInfo & var
Definition: ILayerSupport.hpp:64
NeonDepthToSpaceWorkload.hpp
NeonSoftmaxWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:522
armnn::LayerType::Lstm
@ Lstm
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1367
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:816
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
Tensor.hpp
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1347
LayerSupportCommon.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:732
NeonPermuteWorkload.hpp
NeonLstmFloatWorkload.hpp
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1168
armnn::Status
Status
Definition: Types.hpp:42
NeonConvolution2dWorkload.hpp
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:904
armnn::BinaryOperation::Minimum
@ Minimum
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
NeonConvertFp32ToFp16Workload.hpp
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
NeonLogicalOrWorkload.hpp
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:857
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:719
NeonLogSoftmaxWorkload.hpp
BackendRegistry.hpp
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:914
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1219
NeonTransposeWorkload.hpp
NeonTransposeConvolution2dWorkload.hpp
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::BinaryOperation::Maximum
@ Maximum
armnn::UnaryOperation::Abs
@ Abs
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::LayerType::Unmap
@ Unmap
NeonDepthwiseConvolutionWorkload.hpp
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1098
NeonAdditionWorkload.hpp
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1110
armnn::LayerType::Mean
@ Mean
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1156
NeonCastWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonSpaceToDepthWorkload.hpp
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:990
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:169
armnn::BinaryOperation::Add
@ Add
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:957
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1563
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1407
NeonPreluWorkload.hpp
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:1612
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
NeonRsqrtWorkload.hpp
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:1142
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:359
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1485
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::UnaryOperation::Sin
@ Sin
Exceptions.hpp
armnn::Optional
Definition: Optional.hpp:270
NeonPooling3dWorkload.hpp
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:1194
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1575
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1019
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:151
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::LayerType::Concat
@ Concat
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1029
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1375
armnn::LayerType::Cast
@ Cast
IgnoreUnused.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
NeonPadWorkload.hpp
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1359
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1420
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::LayerType::LogSoftmax
@ LogSoftmax
NeonComparisonWorkload.hpp
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18
Types.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1251
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::LayerType::Output
@ Output
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1125
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::BinaryOperation::Div
@ Div
armnn::LayerType::Prelu
@ Prelu
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:835
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:164
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1331
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:390
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:671
NeonStackWorkload.hpp
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::UnaryOperation::Log
@ Log
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::BoostLogSeverityMapping::info
@ info
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1010
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:924
NeonLogWorkload.hpp