ArmNN
 23.02
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
9 
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/Types.hpp>
14 
15 #include <InternalTypes.hpp>
16 #include <LayerSupportCommon.hpp>
19 
20 #if defined(ARMCOMPUTENEON_ENABLED)
85 #endif
86 
87 namespace armnn
88 {
89 
90 namespace
91 {
92 
93 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
94 {
95  if (!type)
96  {
97  return info;
98  }
99  return TensorInfo(info.GetShape(),
100  type.value(),
101  info.GetQuantizationScale(),
102  info.GetQuantizationOffset(),
103  info.IsConstant());
104 }
105 
106 template< typename ... Args>
107 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
108 {
109  IgnoreUnused(reasonIfUnsupported, (args)...);
110 #if defined(ARMCOMPUTENEON_ENABLED)
111  return true;
112 #else
113  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
114  return false;
115 #endif
116 }
117 
118 template<typename FloatFunc, typename Uint8Func, typename ... Params>
119 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
120  DataType dataType,
121  FloatFunc floatFuncPtr,
122  Uint8Func uint8FuncPtr,
123  Params&&... params)
124 {
125  return IsNeonBackendSupported(reasonIfUnsupported) &&
126  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
127  dataType,
128  floatFuncPtr,
129  floatFuncPtr,
130  uint8FuncPtr,
131  &FalseFunc<>,
132  &FalseFunc<>,
133  std::forward<Params>(params)...);
134 }
135 
136 #if defined(ARMCOMPUTENEON_ENABLED)
137 template<class FuncType, class... Args>
138 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
139 {
140  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
141  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
142  if (!supported && reasonIfUnsupported)
143  {
144  reasonIfUnsupported.value() = aclStatus.error_description();
145  }
146  return supported;
147 }
148 
149 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
150  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
151 #else
152 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
153  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
154 #endif
155 } // anonymous namespace
156 
158  : m_ModelContextPtr(modelContextPtr)
159 {
160 }
161 
163  : m_ModelContextPtr(nullptr)
164 {
165 }
166 
168  const std::vector<TensorInfo>& infos,
169  const BaseDescriptor& descriptor,
170  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
171  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
172  Optional<std::string&> reasonIfUnsupported,
173  const NeonLayerSupport& support)
174 {
175  switch (type)
176  {
178  return support.IsActivationSupported(infos[0],
179  infos[1],
180  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
181  reasonIfUnsupported);
182  case LayerType::Addition:
183  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
185  return support.IsArgMinMaxSupported(infos[0],
186  infos[1],
187  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
188  reasonIfUnsupported);
190  return support.IsBatchMatMulSupported(infos[0],
191  infos[1],
192  infos[2],
193  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
194  reasonIfUnsupported);
196  return support.IsBatchNormalizationSupported(infos[0],
197  infos[1],
198  infos[2],
199  infos[3],
200  infos[4],
201  infos[5],
202  *(PolymorphicDowncast<const
203  BatchNormalizationDescriptor*>(&descriptor)),
204  reasonIfUnsupported);
206  return support.IsBatchToSpaceNdSupported(infos[0],
207  infos[1],
208  *(PolymorphicDowncast<const
209  BatchToSpaceNdDescriptor*>(&descriptor)),
210  reasonIfUnsupported);
211  case LayerType::Cast:
212  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
214  return support.IsChannelShuffleSupported(infos[0],
215  infos[1],
216  *(PolymorphicDowncast<const
217  ChannelShuffleDescriptor*>(&descriptor)),
218  reasonIfUnsupported);
220  return support.IsComparisonSupported(infos[0],
221  infos[1],
222  infos[2],
223  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
224  reasonIfUnsupported);
225  case LayerType::Concat:
226  {
227  std::vector<const TensorInfo*> inputInfos;
228  for (uint32_t i = 0; i < (infos.size() - 1); i++)
229  {
230  inputInfos.push_back(&infos[i]);
231  }
232  return support.IsConcatSupported(inputInfos,
233  infos[infos.size() - 1],
234  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
235  reasonIfUnsupported);
236  }
237  case LayerType::Constant:
238  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
240  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
242  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
244  {
245  if (infos.size() != 4)
246  {
247  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
248  "TensorInfos should be of format: {input, output, weights, biases}.");
249  }
250 
251  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
252  if (infos[3] == TensorInfo())
253  {
254  return support.IsConvolution2dSupported(infos[0],
255  infos[1],
256  desc,
257  infos[2],
258  EmptyOptional(),
259  reasonIfUnsupported);
260  }
261  else
262  {
263  return support.IsConvolution2dSupported(infos[0],
264  infos[1],
265  desc,
266  infos[2],
267  infos[3],
268  reasonIfUnsupported);
269  }
270  }
272  {
273  if (infos.size() != 4)
274  {
275  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
276  "TensorInfos should be of format: {input, output, weights, biases}.");
277  }
278 
279  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
280  if (infos[3] == TensorInfo())
281  {
282  return support.IsConvolution3dSupported(infos[0],
283  infos[1],
284  desc,
285  infos[2],
286  EmptyOptional(),
287  reasonIfUnsupported);
288  }
289  else
290  {
291  return support.IsConvolution3dSupported(infos[0],
292  infos[1],
293  desc,
294  infos[2],
295  infos[3],
296  reasonIfUnsupported);
297  }
298  }
300  return support.IsDepthToSpaceSupported(infos[0],
301  infos[1],
302  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
303  reasonIfUnsupported);
305  {
306  if (infos.size() != 4)
307  {
308  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
309  "TensorInfos should be of format: {input, output, weights, biases}.");
310  }
311 
312  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
313  if (infos[3] == TensorInfo())
314  {
315  return support.IsDepthwiseConvolutionSupported(infos[0],
316  infos[1],
317  desc,
318  infos[2],
319  EmptyOptional(),
320  reasonIfUnsupported);
321  }
322  else
323  {
324  return support.IsDepthwiseConvolutionSupported(infos[0],
325  infos[1],
326  desc,
327  infos[2],
328  infos[3],
329  reasonIfUnsupported);
330  }
331  }
333  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
335  {
336  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
337  return support.IsDetectionPostProcessSupported(infos[0],
338  infos[1],
339  infos[2],
340  infos[3],
341  infos[4],
342  infos[5],
343  infos[6],
344  desc,
345  reasonIfUnsupported);
346  }
347  case LayerType::Division:
348  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
350  return support.IsElementwiseUnarySupported(infos[0],
351  infos[1],
352  *(PolymorphicDowncast<const
353  ElementwiseUnaryDescriptor*>(&descriptor)),
354  reasonIfUnsupported);
355  case LayerType::Fill:
356  return support.IsFillSupported(infos[0],
357  infos[1],
358  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
359  reasonIfUnsupported);
360  case LayerType::Floor:
361  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
363  return support.IsFullyConnectedSupported(infos[0],
364  infos[1],
365  infos[2],
366  infos[3],
367  *(PolymorphicDowncast<const
368  FullyConnectedDescriptor*>(&descriptor)),
369  reasonIfUnsupported);
370  case LayerType::Gather:
371  return support.IsGatherSupported(infos[0],
372  infos[1],
373  infos[2],
374  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
375  reasonIfUnsupported);
376  case LayerType::GatherNd:
377  return support.IsGatherNdSupported(infos[0],
378  infos[1],
379  infos[2],
380  reasonIfUnsupported);
381  case LayerType::Input:
382  return support.IsInputSupported(infos[0], reasonIfUnsupported);
384  return support.IsInstanceNormalizationSupported(infos[0],
385  infos[1],
386  *(PolymorphicDowncast<const
387  InstanceNormalizationDescriptor*>(&descriptor)),
388  reasonIfUnsupported);
390  return support.IsL2NormalizationSupported(infos[0],
391  infos[1],
392  *(PolymorphicDowncast<const
393  L2NormalizationDescriptor*>(&descriptor)),
394  reasonIfUnsupported);
396  return support.IsLogicalBinarySupported(infos[0],
397  infos[1],
398  infos[2],
399  *(PolymorphicDowncast<const
400  LogicalBinaryDescriptor*>(&descriptor)),
401  reasonIfUnsupported);
403  return support.IsLogSoftmaxSupported(infos[0],
404  infos[1],
405  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
406  reasonIfUnsupported);
407  case LayerType::Lstm:
408  return support.IsLstmSupported(infos[0],
409  infos[1],
410  infos[2],
411  infos[3],
412  infos[4],
413  infos[5],
414  infos[6],
415  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
416  lstmParamsInfo.value(),
417  reasonIfUnsupported);
418  case LayerType::Map:
419  return true;
420  case LayerType::Maximum:
421  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
422  case LayerType::Mean:
423  return support.IsMeanSupported(infos[0],
424  infos[1],
425  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
426  reasonIfUnsupported);
427  case LayerType::MemCopy:
428  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
430  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
431  case LayerType::Merge:
432  return support.IsMergeSupported(infos[0],
433  infos[1],
434  infos[2],
435  reasonIfUnsupported);
436  case LayerType::Minimum:
437  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
439  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
441  return support.IsNormalizationSupported(infos[0],
442  infos[1],
443  *(PolymorphicDowncast<const
444  NormalizationDescriptor*>(&descriptor)),
445  reasonIfUnsupported);
446  case LayerType::Output:
447  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
448  case LayerType::Pad:
449  return support.IsPadSupported(infos[0],
450  infos[1],
451  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
452  reasonIfUnsupported);
453  case LayerType::Permute:
454  return support.IsPermuteSupported(infos[0],
455  infos[1],
456  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
457  reasonIfUnsupported);
459  return support.IsPooling2dSupported(infos[0],
460  infos[1],
461  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
462  reasonIfUnsupported);
464  return support.IsPooling3dSupported(infos[0],
465  infos[1],
466  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
467  reasonIfUnsupported);
468  case LayerType::Prelu:
469  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
470  case LayerType::QLstm:
471  return support.IsQLstmSupported(infos[0],
472  infos[1],
473  infos[2],
474  infos[3],
475  infos[4],
476  infos[5],
477  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
478  lstmParamsInfo.value(),
479  reasonIfUnsupported);
480  case LayerType::Quantize:
481  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
483  return support.IsQuantizedLstmSupported(infos[0],
484  infos[1],
485  infos[2],
486  infos[3],
487  infos[4],
488  quantizedLstmParamsInfo.value(),
489  reasonIfUnsupported);
490  case LayerType::Rank:
491  return true;
492  case LayerType::Reshape:
493  return support.IsReshapeSupported(infos[0],
494  infos[1],
495  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
496  reasonIfUnsupported);
497  case LayerType::Resize:
498  return support.IsResizeSupported(infos[0],
499  infos[1],
500  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
501  reasonIfUnsupported);
502  case LayerType::Reduce:
503  return support.IsReduceSupported(infos[0],
504  infos[1],
505  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
506  reasonIfUnsupported);
507  case LayerType::Shape:
508  return support.IsShapeSupported(infos[0],
509  infos[1],
510  reasonIfUnsupported);
511  case LayerType::Slice:
512  return support.IsSliceSupported(infos[0],
513  infos[1],
514  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
515  reasonIfUnsupported);
516  case LayerType::Softmax:
517  return support.IsSoftmaxSupported(infos[0],
518  infos[1],
519  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
520  reasonIfUnsupported);
522  return support.IsSpaceToBatchNdSupported(infos[0],
523  infos[1],
524  *(PolymorphicDowncast<const
525  SpaceToBatchNdDescriptor*>(&descriptor)),
526  reasonIfUnsupported);
528  return support.IsSpaceToDepthSupported(infos[0],
529  infos[1],
530  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
531  reasonIfUnsupported);
532  case LayerType::Splitter:
533  {
534  std::vector<TensorInfo> outputInfos;
535  for (uint32_t i = 1; i < infos.size(); i++)
536  {
537  outputInfos.push_back(infos[i]);
538  }
539  return support.IsSplitterSupported(infos[0],
540  {outputInfos.begin(), outputInfos.end()},
541  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
542  reasonIfUnsupported);
543  }
544  case LayerType::Stack:
545  {
546  std::vector<const TensorInfo*> inputInfos;
547  for (uint32_t i = 0; i < infos.size() - 1; i++)
548  {
549  inputInfos.push_back(&infos[i]);
550  }
551  return support.IsStackSupported(inputInfos,
552  infos[infos.size() - 1],
553  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
554  reasonIfUnsupported);
555  }
557  return support.IsStridedSliceSupported(infos[0],
558  infos[1],
559  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
560  reasonIfUnsupported);
562  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
564  return support.IsTransposeSupported(infos[0],
565  infos[1],
566  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
567  reasonIfUnsupported);
569  {
570  if (infos.size() != 4)
571  {
572  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
573  "TensorInfos should be of format: {input, output, weights, biases}.");
574  }
575 
576  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
577  if (infos[3] == TensorInfo())
578  {
579  return support.IsTransposeConvolution2dSupported(infos[0],
580  infos[1],
581  desc,
582  infos[2],
583  EmptyOptional(),
584  reasonIfUnsupported);
585  }
586  else
587  {
588  return support.IsTransposeConvolution2dSupported(infos[0],
589  infos[1],
590  desc,
591  infos[2],
592  infos[3],
593  reasonIfUnsupported);
594  }
595  }
597  {
598  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
599  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
600  infos[1],
601  infos[2],
602  infos[3],
603  infos[4],
604  infos[5],
605  desc,
606  lstmParamsInfo.value(),
607  reasonIfUnsupported);
608  }
609  case LayerType::Unmap:
610  return true;
611  default:
612  // layers not supported in neon by default:
613  // debug, fakequantization, precompiled,
614  // standin, switch
615  return false;
616  }
617 }
618 
620  const std::vector<TensorInfo>& infos,
621  const BaseDescriptor& descriptor,
622  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
623  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
624  Optional<std::string&> reasonIfUnsupported) const
625 {
626  bool isSupported = IsLayerTypeSupported(type,
627  infos,
628  descriptor,
629  lstmParamsInfo,
630  quantizedLstmParamsInfo,
632  *this);
633 
634  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
635  // architecture. If the available architecture is older than v8.2, we can check if the operator is
636  // supported by changing operator inputs & outputs to be FP32.
637  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
638  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
639  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
640  // and, insert convert layers around the FP32 operator.
641  if (reasonIfUnsupported.has_value())
642  {
643  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
644  if (!isSupported
645  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
646  {
647  std::vector<TensorInfo> newInfos;
648  for (auto info: infos)
649  {
650  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
651  }
652 
653  std::string tmpString;
654  return IsLayerTypeSupported(type,
655  newInfos,
656  descriptor,
657  lstmParamsInfo,
658  quantizedLstmParamsInfo,
659  tmpString,
660  *this);
661  }
662  }
663 
664  return isSupported;
665 }
666 
668  const TensorInfo& output,
669  const ActivationDescriptor& descriptor,
670  Optional<std::string&> reasonIfUnsupported) const
671 {
675  input,
676  output,
677  descriptor);
678 }
679 
681  const TensorInfo& input1,
682  const TensorInfo& output,
683  Optional<std::string&> reasonIfUnsupported) const
684 {
687  input0,
688  input1,
689  output,
690  nullptr);
691 }
692 
694  const TensorInfo& output,
695  const ArgMinMaxDescriptor& descriptor,
696  Optional<std::string&> reasonIfUnsupported) const
697 {
700  input,
701  output,
702  descriptor);
703 }
704 
706  const TensorInfo& inputY,
707  const TensorInfo& output,
708  const BatchMatMulDescriptor& descriptor,
709  Optional<std::string&> reasonIfUnsupported) const
710 {
713  inputX,
714  inputY,
715  output,
716  descriptor);
717 }
718 
720  const TensorInfo& output,
721  const TensorInfo& mean,
722  const TensorInfo& var,
723  const TensorInfo& beta,
724  const TensorInfo& gamma,
725  const BatchNormalizationDescriptor& descriptor,
726  Optional<std::string&> reasonIfUnsupported) const
727 {
730  input,
731  output,
732  mean,
733  var,
734  beta,
735  gamma,
736  descriptor,
737  nullptr);
738 }
739 
741  const TensorInfo& output,
742  const BatchToSpaceNdDescriptor& descriptor,
743  Optional<std::string&> reasonIfUnsupported) const
744 {
747  input,
748  output,
749  descriptor);
750 }
751 
753  const TensorInfo& output,
754  Optional<std::string&> reasonIfUnsupported) const
755 {
758  input,
759  output);
760 }
761 
763  const TensorInfo& output,
764  const ChannelShuffleDescriptor& descriptor,
765  Optional<std::string&> reasonIfUnsupported) const
766 {
769  input,
770  output,
771  descriptor);
772 }
773 
775  const TensorInfo& input1,
776  const TensorInfo& output,
777  const ComparisonDescriptor& descriptor,
778  Optional<std::string&> reasonIfUnsupported) const
779 {
780 
783  input0,
784  input1,
785  output,
786  descriptor);
787 }
788 
789 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
790  const TensorInfo& output,
791  const OriginsDescriptor& descriptor,
792  Optional<std::string&> reasonIfUnsupported) const
793 {
794  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
795  {
796  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
797  return false;
798  }
799 
800  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
801  if(concatInnerAxis < 3) // Width, height, or channels
802  {
805  inputs,
806  output,
807  descriptor);
808  }
809  else if (concatInnerAxis == 3)
810  {
811  for (auto& input : inputs)
812  {
813  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
814  {
815  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
816  return false;
817  }
818  }
819  return true; // Sub-tensors support concat along batch
820  }
821  else // > 4 dimensions not supported.
822  {
823  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
824  return false;
825  }
826 }
827 
829  Optional<std::string&> reasonIfUnsupported) const
830 {
833  output);
834 }
835 
837  const TensorInfo& output,
838  Optional<std::string&> reasonIfUnsupported) const
839 {
840  armnn::IgnoreUnused(input);
843  return true;
844 }
845 
847  const TensorInfo& output,
848  Optional<std::string&> reasonIfUnsupported) const
849 {
850  armnn::IgnoreUnused(input);
853  return true;
854 }
855 
857  const TensorInfo& output,
858  const Convolution2dDescriptor& descriptor,
859  const TensorInfo& weights,
860  const Optional<TensorInfo>& biases,
861  Optional<std::string&> reasonIfUnsupported) const
862 {
863  bool isFastMathEnabled = false;
864 #if defined(ARMCOMPUTENEON_ENABLED)
865  if (m_ModelContextPtr)
866  {
867  if (m_ModelContextPtr.get() != nullptr)
868  {
869  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
870  if (modelOptions)
871  {
872  isFastMathEnabled = modelOptions->IsFastMathEnabled();
873  }
874  }
875  }
876 #endif
877 
880  input,
881  output,
882  descriptor,
883  weights,
884  biases,
885  isFastMathEnabled,
886  nullptr);
887 }
888 
890  const TensorInfo& output,
891  const Convolution3dDescriptor& descriptor,
892  const TensorInfo& weights,
893  const Optional<TensorInfo>& biases,
894  Optional<std::string&> reasonIfUnsupported) const
895 {
896  bool isFastMathEnabled = false;
897 #if defined(ARMCOMPUTENEON_ENABLED)
898  if (m_ModelContextPtr)
899  {
900  if (m_ModelContextPtr.get() != nullptr)
901  {
902  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
903  if (modelOptions)
904  {
905  isFastMathEnabled = modelOptions->IsFastMathEnabled();
906  }
907  }
908  }
909 #endif
910 
913  input,
914  output,
915  descriptor,
916  weights,
917  biases,
918  isFastMathEnabled,
919  nullptr);
920 }
921 
923  const TensorInfo& output,
924  const DepthToSpaceDescriptor& descriptor,
925  Optional<std::string&> reasonIfUnsupported) const
926 {
929  input,
930  output,
931  descriptor);
932 }
933 
935  const TensorInfo& output,
936  const DepthwiseConvolution2dDescriptor& descriptor,
937  const TensorInfo& weights,
938  const Optional<TensorInfo>& biases,
939  Optional<std::string&> reasonIfUnsupported) const
940 {
943  input,
944  output,
945  descriptor,
946  weights,
947  biases,
948  nullptr);
949 }
950 
952  const TensorInfo& output,
953  Optional<std::string&> reasonIfUnsupported) const
954 {
957  input,
958  output);
959 }
960 
962  const TensorInfo& output,
963  const DepthwiseConvolution2dDescriptor& descriptor,
964  const TensorInfo& weights,
965  const Optional<TensorInfo>& biases,
966  Optional<std::string&> reasonIfUnsupported) const
967 {
970  input,
971  output,
972  descriptor,
973  weights,
974  biases,
975  nullptr);
976 }
977 
979  const TensorInfo& output,
980  const ElementwiseUnaryDescriptor& descriptor,
981  Optional<std::string&> reasonIfUnsupported) const
982 {
983  switch(descriptor.m_Operation)
984  {
985  case UnaryOperation::Abs:
988  input,
989  output);
990  case UnaryOperation::Exp:
993  input,
994  output);
998  input,
999  output);
1000  case UnaryOperation::Log:
1003  input,
1004  output);
1005  case UnaryOperation::Neg:
1008  input,
1009  output);
1010  case UnaryOperation::Rsqrt:
1013  input,
1014  output);
1015  case UnaryOperation::Sin:
1018  input,
1019  output);
1020  case UnaryOperation::Sqrt:
1023  input,
1024  output);
1025  default:
1026  return false;
1027  }
1028 }
1029 
1031  const TensorInfo& output,
1032  const FillDescriptor& descriptor,
1033  Optional<std::string&> reasonIfUnsupported) const
1034 {
1035  armnn::IgnoreUnused(input);
1038 
1039  return IsNeonBackendSupported(reasonIfUnsupported);
1040 }
1041 
1043  const TensorInfo& output,
1044  Optional<std::string&> reasonIfUnsupported) const
1045 {
1047  return IsNeonBackendSupported(reasonIfUnsupported) &&
1049  input.GetDataType(),
1050  &FalseFuncF16<>,
1051  &TrueFunc<>,
1052  &FalseFuncU8<>,
1053  &FalseFuncI32<>,
1054  &FalseFuncU8<>);
1055 }
1056 
1058  const TensorInfo& output,
1059  const TensorInfo& weights,
1060  const TensorInfo& biases,
1061  const FullyConnectedDescriptor& descriptor,
1062  Optional<std::string&> reasonIfUnsupported) const
1063 {
1066  input,
1067  output,
1068  weights,
1069  biases,
1070  descriptor,
1071  nullptr);
1072 }
1073 
1075  const TensorInfo& input1,
1076  const TensorInfo& output,
1077  const GatherDescriptor& descriptor,
1078  Optional<std::string&> reasonIfUnsupported) const
1079 {
1082  input0,
1083  input1,
1084  output,
1085  descriptor);
1086 }
1087 
1089  const TensorInfo& input1,
1090  const TensorInfo& output,
1091  Optional<std::string&> reasonIfUnsupported) const
1092 {
1095  input0,
1096  input1,
1097  output);
1098 }
1099 
1101  Optional<std::string&> reasonIfUnsupported) const
1102 {
1103  return IsNeonBackendSupported(reasonIfUnsupported, input);
1104 }
1105 
1107  const TensorInfo& output,
1108  const InstanceNormalizationDescriptor& descriptor,
1109  Optional<std::string&> reasonIfUnsupported) const
1110 {
1113  input,
1114  output,
1115  descriptor);
1116 }
1117 
1119  const TensorInfo& output,
1120  const L2NormalizationDescriptor& descriptor,
1121  Optional<std::string&> reasonIfUnsupported) const
1122 {
1124 }
1125 
1127  const TensorInfo& input1,
1128  const TensorInfo& output,
1129  const LogicalBinaryDescriptor& descriptor,
1130  Optional<std::string&> reasonIfUnsupported) const
1131 {
1132  switch(descriptor.m_Operation)
1133  {
1137  input0,
1138  input1,
1139  output);
1143  input0,
1144  input1,
1145  output);
1146  default:
1147  return false;
1148  }
1149 }
1150 
1152  const TensorInfo& output,
1153  const LogSoftmaxDescriptor& descriptor,
1154  Optional<std::string&> reasonIfUnsupported) const
1155 {
1157 }
1158 
1160  const TensorInfo& outputStateIn,
1161  const TensorInfo& cellStateIn,
1162  const TensorInfo& scratchBuffer,
1163  const TensorInfo& outputStateOut,
1164  const TensorInfo& cellStateOut,
1165  const TensorInfo& output,
1166  const LstmDescriptor& descriptor,
1167  const LstmInputParamsInfo& paramsInfo,
1168  Optional<std::string&> reasonIfUnsupported) const
1169 {
1172  input,
1173  outputStateIn,
1174  cellStateIn,
1175  scratchBuffer,
1177  cellStateOut,
1178  output,
1179  descriptor,
1180  paramsInfo);
1181 }
1182 
1184  const TensorInfo& input1,
1185  const TensorInfo& output,
1186  Optional<std::string&> reasonIfUnsupported) const
1187 {
1190  input0,
1191  input1,
1192  output);
1193 }
1194 
1196  const TensorInfo& output,
1197  const MeanDescriptor& descriptor,
1198  Optional<std::string&> reasonIfUnsupported) const
1199 {
1202  input,
1203  output,
1204  descriptor);
1205 }
1206 
1208  const TensorInfo& input1,
1209  const TensorInfo& output,
1210  Optional<std::string&> reasonIfUnsupported) const
1211 {
1214  input0,
1215  input1,
1216  output);
1217 }
1218 
1220  const TensorInfo& input1,
1221  const TensorInfo& output,
1222  Optional<std::string&> reasonIfUnsupported) const
1223 {
1226  input0,
1227  input1,
1228  output,
1229  nullptr);
1230 }
1231 
1233  const TensorInfo& input1,
1234  const TensorInfo& output,
1235  Optional<std::string&> reasonIfUnsupported) const
1236 {
1239  input0,
1240  input1,
1241  output,
1242  nullptr);
1243 }
1244 
1246  const TensorInfo& output,
1247  const NormalizationDescriptor& descriptor,
1248  Optional<std::string&> reasonIfUnsupported) const
1249 {
1252  input,
1253  output,
1254  descriptor);
1255 }
1256 
1258  Optional<std::string&> reasonIfUnsupported) const
1259 {
1260  return IsNeonBackendSupported(reasonIfUnsupported, output);
1261 }
1262 
1264  const TensorInfo& output,
1265  const PadDescriptor& descriptor,
1266  Optional<std::string&> reasonIfUnsupported) const
1267 {
1270  input,
1271  output,
1272  descriptor);
1273 }
1274 
1276  const TensorInfo& output,
1277  const PermuteDescriptor& descriptor,
1278  Optional<std::string&> reasonIfUnsupported) const
1279 {
1281 }
1282 
1284  const TensorInfo& output,
1285  const Pooling2dDescriptor& descriptor,
1286  Optional<std::string&> reasonIfUnsupported) const
1287 {
1289 }
1290 
1292  const TensorInfo& output,
1293  const Pooling3dDescriptor& descriptor,
1294  Optional<std::string&> reasonIfUnsupported) const
1295 {
1297 }
1298 
1300  const armnn::TensorInfo &alpha,
1301  const armnn::TensorInfo &output,
1302  armnn::Optional<std::string &> reasonIfUnsupported) const
1303 {
1305 }
1306 
1308  const TensorInfo& previousOutputIn,
1309  const TensorInfo& previousCellStateIn,
1310  const TensorInfo& outputStateOut,
1311  const TensorInfo& cellStateOut,
1312  const TensorInfo& output,
1313  const QLstmDescriptor& descriptor,
1314  const LstmInputParamsInfo& paramsInfo,
1315  Optional<std::string&> reasonIfUnsupported) const
1316 {
1317  // Check required here in order to pass IsLayerSupported for datatypes tests
1318  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1324  {
1327  input,
1330  cellStateOut,
1332  output,
1333  descriptor,
1334  paramsInfo);
1335  }
1336  else
1337  {
1338  return false;
1339  }
1340 }
1341 
1343  const TensorInfo& output,
1344  Optional<std::string&> reasonIfUnsupported) const
1345 {
1348  input,
1349  output);
1350 }
1351 
1353  const TensorInfo& cellStateIn,
1354  const TensorInfo& outputStateIn,
1355  const TensorInfo& cellStateOut,
1356  const TensorInfo& outputStateOut,
1357  const QuantizedLstmInputParamsInfo& paramsInfo,
1358  Optional<std::string&> reasonIfUnsupported) const
1359 {
1362  input,
1363  cellStateIn,
1364  outputStateIn,
1365  cellStateOut,
1367  paramsInfo);
1368 }
1369 
1371  const TensorInfo& output,
1372  const ReduceDescriptor& descriptor,
1373  Optional<std::string&> reasonIfUnsupported) const
1374 {
1377  input,
1378  output,
1379  descriptor);
1380 }
1381 
1383  const TensorInfo& output,
1384  const ReshapeDescriptor& descriptor,
1385  Optional<std::string&> reasonIfUnsupported) const
1386 {
1390  input,
1391  output);
1392 }
1393 
1395  const TensorInfo& output,
1396  const ResizeDescriptor& descriptor,
1397  Optional<std::string&> reasonIfUnsupported) const
1398 {
1401  input,
1402  output,
1403  descriptor);
1404 }
1405 
1407  const TensorInfo& output,
1408  const SliceDescriptor& descriptor,
1409  Optional<std::string&> reasonIfUnsupported) const
1410 {
1413  input,
1414  output,
1415  descriptor);
1416 }
1417 
1419  const TensorInfo& output,
1420  const SoftmaxDescriptor& descriptor,
1421  Optional<std::string&> reasonIfUnsupported) const
1422 {
1424 }
1425 
1427  const TensorInfo& output,
1428  const SpaceToBatchNdDescriptor& descriptor,
1429  Optional<std::string&> reasonIfUnsupported) const
1430 {
1433  input,
1434  output,
1435  descriptor);
1436 }
1437 
1439  const TensorInfo& output,
1440  const SpaceToDepthDescriptor& descriptor,
1441  Optional<std::string&> reasonIfUnsupported) const
1442 {
1445  input,
1446  output,
1447  descriptor);
1448 }
1449 
1451  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1452  const ViewsDescriptor& descriptor,
1453  Optional<std::string&> reasonIfUnsupported) const
1454 {
1455 #if defined(ARMCOMPUTENEON_ENABLED)
1456  // Split along the last dimension, cannot use sub-tensors
1457  // as width and height of the sub-tensors do not match
1458  // the width and height of the parent tensor
1459  // in case of input with more than 2D.
1460  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1461  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1462  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1463  {
1466  input,
1467  outputs,
1468  *splitAxis.begin());
1469  }
1470 #endif
1472  for (auto output : outputs)
1473  {
1474  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1475  {
1476  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1477  return false;
1478  }
1479  }
1480  return true;
1481 }
1482 
1483 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1484  const TensorInfo& output,
1485  const StackDescriptor& descriptor,
1486  Optional<std::string&> reasonIfUnsupported) const
1487 {
1490  inputs,
1491  output,
1492  descriptor);
1493 }
1494 
1496  const TensorInfo& output,
1497  const StridedSliceDescriptor& descriptor,
1498  Optional<std::string&> reasonIfUnsupported) const
1499 {
1502  input,
1503  output,
1504  descriptor);
1505 }
1506 
1508  const TensorInfo& input1,
1509  const TensorInfo& output,
1510  Optional<std::string&> reasonIfUnsupported) const
1511 {
1514  input0,
1515  input1,
1516  output,
1517  nullptr);
1518 }
1519 
1521  const TensorInfo& output,
1522  const TransposeConvolution2dDescriptor& descriptor,
1523  const TensorInfo& weights,
1524  const Optional<TensorInfo>& biases,
1525  Optional<std::string&> reasonIfUnsupported) const
1526 {
1529  input,
1530  output,
1531  descriptor,
1532  weights,
1533  biases);
1534 }
1535 
1537  const TensorInfo& output,
1538  const TransposeDescriptor& descriptor,
1539  Optional<std::string&> reasonIfUnsupported) const
1540 {
1542 }
1543 
1545  const TensorInfo& outputStateIn,
1546  const TensorInfo& cellStateIn,
1547  const TensorInfo& outputStateOut,
1548  const TensorInfo& cellStateOut,
1549  const TensorInfo& output,
1550  const UnidirectionalSequenceLstmDescriptor& descriptor,
1551  const LstmInputParamsInfo& paramsInfo,
1552  Optional<std::string&> reasonIfUnsupported) const
1553 {
1554  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1560  {
1563  input,
1564  outputStateIn,
1565  cellStateIn,
1567  cellStateOut,
1568  output,
1569  descriptor,
1570  paramsInfo);
1571  }
1572  else
1573  {
1576  input,
1577  outputStateIn,
1578  cellStateIn,
1580  cellStateOut,
1581  output,
1582  descriptor,
1583  paramsInfo);
1584  }
1585 }
1586 
1587 } // namespace armnn
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1394
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor)
Definition: NeonBatchMatMulWorkload.cpp:23
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:740
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1283
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
NeonPooling2dWorkload.hpp
armnn::LayerType::MemCopy
@ MemCopy
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:978
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
NeonQuantizeWorkload.hpp
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::LayerType::Transpose
@ Transpose
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:32
NeonAbsWorkload.hpp
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
NeonResizeWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:912
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:752
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:737
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1437
NeonQLstmWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1418
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:922
NeonActivationWorkload.hpp
NeonArgMinMaxWorkload.hpp
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:109
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1143
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:157
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:693
NeonGatherNdWorkload.hpp
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1198
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
NeonBatchToSpaceNdWorkload.hpp
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1175
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1275
NeonSplitterWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1438
armnn::LayerType::Input
@ Input
NeonBackendModelContext.hpp
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1049
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1536
NeonConstantWorkload.hpp
NeonExpWorkload.hpp
armnn::LayerType::Maximum
@ Maximum
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:475
NeonMaximumWorkload.hpp
armnn::LayerType::Quantize
@ Quantize
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1450
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
NeonInstanceNormalizationWorkload.hpp
NeonDequantizeWorkload.hpp
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1531
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:932
armnn::LayerType::ArgMinMax
@ ArgMinMax
NeonConcatWorkload.hpp
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1250
armnn::LayerType::Subtraction
@ Subtraction
NeonL2NormalizationFloatWorkload.hpp
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1207
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::UnaryOperation::Exp
@ Exp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:399
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1485
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
NeonBatchMatMulWorkload.hpp
NeonMeanWorkload.hpp
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::Neg
@ Neg
NeonLogicalNotWorkload.hpp
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
NeonMinimumWorkload.hpp
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::LayerType::Merge
@ Merge
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:551
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:224
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::LayerType::Permute
@ Permute
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:774
NeonChannelShuffleWorkload.hpp
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:20
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1232
armnn::LayerType::Reduce
@ Reduce
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1483
NeonSubtractionWorkload.hpp
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1195
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
NeonLayerSupport.hpp
armnn::LayerType::Division
@ Division
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1219
NeonSpaceToBatchNdWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1342
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1426
NeonSqrtWorkload.hpp
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1245
NeonReduceWorkload.hpp
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:719
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
NeonLogicalAndWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:843
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1022
NeonUnidirectionalSequenceLstmWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:934
armnn::LayerType::Activation
@ Activation
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1520
NeonMultiplicationWorkload.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
NeonStridedSliceWorkload.hpp
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:705
NeonDivisionWorkload.hpp
NeonBatchNormalizationWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1106
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:828
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:233
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:893
NeonReshapeWorkload.hpp
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:466
armnn::LayerType::Reshape
@ Reshape
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1257
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1406
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
NeonFullyConnectedWorkload.hpp
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
NeonGatherWorkload.hpp
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
NeonNegWorkload.hpp
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1370
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::LayerType::Rank
@ Rank
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:627
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1119
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
NeonQuantizedLstmWorkload.hpp
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:762
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
NeonBackendId.hpp
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
ArmComputeTensorUtils.hpp
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1159
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1382
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
armnn::LayerType::Pooling2d
@ Pooling2d
InternalTypes.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:777
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1118
armnn::DataType::Float32
@ Float32
NeonNormalizationFloatWorkload.hpp
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:20
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1509
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:556
armnn::LayerType::GatherNd
@ GatherNd
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
ArmComputeUtils.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
NeonSinWorkload.hpp
armnn::LayerType::Constant
@ Constant
armnn::ILayerSupport::var
const TensorInfo const TensorInfo const TensorInfo & var
Definition: ILayerSupport.hpp:64
NeonDepthToSpaceWorkload.hpp
NeonSoftmaxWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:502
armnn::LayerType::Lstm
@ Lstm
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1299
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:796
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
Tensor.hpp
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1327
LayerSupportCommon.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:680
NeonPermuteWorkload.hpp
NeonLstmFloatWorkload.hpp
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1100
armnn::Status
Status
Definition: Types.hpp:42
NeonConvolution2dWorkload.hpp
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:836
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
NeonLogicalOrWorkload.hpp
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:789
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:667
NeonLogSoftmaxWorkload.hpp
BackendRegistry.hpp
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:846
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1151
NeonTransposeWorkload.hpp
NeonTransposeConvolution2dWorkload.hpp
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::UnaryOperation::Abs
@ Abs
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::LayerType::Unmap
@ Unmap
NeonDepthwiseConvolutionWorkload.hpp
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1030
NeonAdditionWorkload.hpp
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1042
armnn::LayerType::Mean
@ Mean
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1088
NeonCastWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonSpaceToDepthWorkload.hpp
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:181
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:970
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:167
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:889
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:129
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1495
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1387
NeonPreluWorkload.hpp
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:1544
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
NeonRsqrtWorkload.hpp
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:1074
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:339
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1465
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::UnaryOperation::Sin
@ Sin
Exceptions.hpp
armnn::Optional
Definition: Optional.hpp:270
NeonPooling3dWorkload.hpp
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:1126
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1507
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:951
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:149
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::LayerType::Concat
@ Concat
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:961
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1307
armnn::LayerType::Cast
@ Cast
IgnoreUnused.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
NeonPadWorkload.hpp
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1291
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1352
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::LayerType::LogSoftmax
@ LogSoftmax
NeonComparisonWorkload.hpp
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18
Types.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1183
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::LayerType::Output
@ Output
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1057
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Prelu
@ Prelu
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:815
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:162
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:1263
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:390
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: NeonLayerSupport.cpp:619
NeonStackWorkload.hpp
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::UnaryOperation::Log
@ Log
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::BoostLogSeverityMapping::info
@ info
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:990
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: NeonLayerSupport.cpp:856
NeonLogWorkload.hpp