ArmNN
 23.02
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
86 #endif
87 
88 
89 namespace armnn
90 {
91 
92 namespace
93 {
94 
95 template<unsigned int FilterSize>
96 bool IsMatchingSize2d(const TensorInfo& weightInfo)
97 {
98  // Width & Height must match.
99  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
100 }
101 
102 template<uint32_t ValidStride>
103 bool IsMatchingStride(uint32_t actualStride)
104 {
105  return ValidStride == actualStride;
106 }
107 
108 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
109 bool IsMatchingStride(uint32_t actualStride)
110 {
111  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
112 }
113 
114 template<typename ... Args>
115 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
116 {
117  IgnoreUnused(reasonIfUnsupported, (args)...);
118 #if defined(ARMCOMPUTECL_ENABLED)
119  return true;
120 #else
121  if (reasonIfUnsupported)
122  {
123  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
124  }
125  return false;
126 #endif
127 }
128 
129 #if defined(ARMCOMPUTECL_ENABLED)
130 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
131 #else
132 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
133 #endif
134 
135 #if defined(ARMCOMPUTECL_ENABLED)
136 template<class FuncType, class... Args>
137 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
138 {
139  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
140  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
141  if (!supported && reasonIfUnsupported)
142  {
143  reasonIfUnsupported.value() = aclStatus.error_description();
144  }
145  return supported;
146 }
147 
148 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
149  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
150 #else
151 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
152  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
153 #endif
154 
155 template<typename FloatFunc, typename Uint8Func, typename ... Params>
156 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
157  DataType dataType,
158  FloatFunc floatFuncPtr,
159  Uint8Func uint8FuncPtr,
160  Params&&... params)
161 {
162  return IsClBackendSupported(reasonIfUnsupported) &&
163  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
164  dataType,
165  floatFuncPtr,
166  floatFuncPtr,
167  uint8FuncPtr,
168  &FalseFunc<>,
169  &FalseFunc<>,
170  std::forward<Params>(params)...);
171 }
172 } // anonymous namespace
173 
175  : m_ModelContextPtr(modelContextPtr)
176 {
177 }
178 
180  : m_ModelContextPtr(nullptr)
181 {
182 }
183 
185  const std::vector<TensorInfo>& infos,
186  const BaseDescriptor& descriptor,
187  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
188  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
189  Optional<std::string&> reasonIfUnsupported) const
190 {
191  switch (type)
192  {
194  return IsActivationSupported(infos[0],
195  infos[1],
196  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
198  case LayerType::Addition:
199  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
201  return IsArgMinMaxSupported(infos[0],
202  infos[1],
203  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
206  return IsBatchMatMulSupported(infos[0],
207  infos[1],
208  infos[2],
209  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
212  return IsBatchNormalizationSupported(infos[0],
213  infos[1],
214  infos[2],
215  infos[3],
216  infos[4],
217  infos[5],
218  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
219  (&descriptor)),
222  return IsBatchToSpaceNdSupported(infos[0],
223  infos[1],
224  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
226  case LayerType::Cast:
227  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
229  return IsChannelShuffleSupported(infos[0],
230  infos[1],
231  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
234  return IsComparisonSupported(infos[0],
235  infos[1],
236  infos[2],
237  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
239  case LayerType::Concat:
240  {
241  std::vector<const TensorInfo*> inputInfos;
242  for (uint32_t i = 0; i < (infos.size() - 1); i++)
243  {
244  inputInfos.push_back(&infos[i]);
245  }
246  return IsConcatSupported(inputInfos,
247  infos[infos.size() - 1],
248  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
250  }
251  case LayerType::Constant:
252  return IsConstantSupported(infos[0], reasonIfUnsupported);
254  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
256  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
258  {
259  if (infos.size() != 4)
260  {
261  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
262  "TensorInfos should be of format: {input, output, weights, biases}.");
263  }
264 
265  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
266  if (infos[3] == TensorInfo())
267  {
268  return IsConvolution2dSupported(infos[0],
269  infos[1],
270  desc,
271  infos[2],
272  EmptyOptional(),
274  }
275  else
276  {
277  return IsConvolution2dSupported(infos[0],
278  infos[1],
279  desc,
280  infos[2],
281  infos[3],
283  }
284  }
286  {
287  if (infos.size() != 4)
288  {
289  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
290  "TensorInfos should be of format: {input, output, weights, biases}.");
291  }
292 
293  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
294  if (infos[3] == TensorInfo())
295  {
296  return IsConvolution3dSupported(infos[0],
297  infos[1],
298  desc,
299  infos[2],
300  EmptyOptional(),
302  }
303  else
304  {
305  return IsConvolution3dSupported(infos[0],
306  infos[1],
307  desc,
308  infos[2],
309  infos[3],
311  }
312  }
314  return IsDepthToSpaceSupported(infos[0],
315  infos[1],
316  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
319  {
320  if (infos.size() != 4)
321  {
322  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
323  "TensorInfos should be of format: {input, output, weights, biases}.");
324  }
325 
326  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
327  if (infos[3] == TensorInfo())
328  {
329  return IsDepthwiseConvolutionSupported(infos[0],
330  infos[1],
331  desc,
332  infos[2],
333  EmptyOptional(),
335  }
336  else
337  {
338  return IsDepthwiseConvolutionSupported(infos[0],
339  infos[1],
340  desc,
341  infos[2],
342  infos[3],
344  }
345  }
347  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
348  case LayerType::Division:
349  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
351  return IsElementwiseUnarySupported(infos[0],
352  infos[1],
353  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
355  case LayerType::Fill:
356  return IsFillSupported(infos[0],
357  infos[1],
358  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
360  case LayerType::Floor:
361  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
363  return IsFullyConnectedSupported(infos[0],
364  infos[1],
365  infos[2],
366  infos[3],
367  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
369  case LayerType::Gather:
370  return IsGatherSupported(infos[0],
371  infos[1],
372  infos[2],
373  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
375  case LayerType::GatherNd:
376  return IsGatherNdSupported(infos[0],
377  infos[1],
378  infos[2],
380  case LayerType::Input:
381  return IsInputSupported(infos[0], reasonIfUnsupported);
383  return IsInstanceNormalizationSupported(infos[0],
384  infos[1],
385  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
386  (&descriptor)),
389  return IsL2NormalizationSupported(infos[0],
390  infos[1],
391  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
394  return IsLogicalBinarySupported(infos[0],
395  infos[1],
396  infos[2],
397  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
400  return IsLogSoftmaxSupported(infos[0],
401  infos[1],
402  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
404  case LayerType::Lstm:
405  return IsLstmSupported(infos[0],
406  infos[1],
407  infos[2],
408  infos[3],
409  infos[4],
410  infos[5],
411  infos[6],
412  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
413  lstmParamsInfo.value(),
415  case LayerType::Map:
416  return true;
417  case LayerType::MemCopy:
418  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
421  case LayerType::Merge:
422  return LayerSupportBase::IsMergeSupported(infos[0],
423  infos[1],
424  infos[2],
426  case LayerType::Maximum:
427  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
428  case LayerType::Mean:
429  return IsMeanSupported(infos[0],
430  infos[1],
431  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
433  case LayerType::Minimum:
434  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
436  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
438  return IsNormalizationSupported(infos[0],
439  infos[1],
440  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
442  case LayerType::Output:
443  return IsOutputSupported(infos[0], reasonIfUnsupported);
444  case LayerType::Pad:
445  return IsPadSupported(infos[0],
446  infos[1],
447  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
449  case LayerType::Permute:
450  return IsPermuteSupported(infos[0],
451  infos[1],
452  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
455  return IsPooling2dSupported(infos[0],
456  infos[1],
457  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
460  return IsPooling3dSupported(infos[0],
461  infos[1],
462  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
464  case LayerType::Prelu:
465  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
466  case LayerType::QLstm:
467  return IsQLstmSupported(infos[0],
468  infos[1],
469  infos[2],
470  infos[3],
471  infos[4],
472  infos[5],
473  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
474  lstmParamsInfo.value(),
476  case LayerType::Quantize:
477  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
479  return IsQuantizedLstmSupported(infos[0],
480  infos[1],
481  infos[2],
482  infos[3],
483  infos[4],
484  quantizedLstmParamsInfo.value(),
486  case LayerType::Rank:
487  return true;
488  case LayerType::Reduce:
489  return IsReduceSupported(infos[0],
490  infos[1],
491  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
493  case LayerType::Reshape:
494  return IsReshapeSupported(infos[0],
495  infos[1],
496  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
498  case LayerType::Resize:
499  return IsResizeSupported(infos[0],
500  infos[1],
501  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
503  case LayerType::Shape:
504  return LayerSupportBase::IsShapeSupported(infos[0],
505  infos[1],
507  case LayerType::Slice:
508  return IsSliceSupported(infos[0],
509  infos[1],
510  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
512  case LayerType::Softmax:
513  return IsSoftmaxSupported(infos[0],
514  infos[1],
515  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
518  return IsSpaceToBatchNdSupported(infos[0],
519  infos[1],
520  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
523  return IsSpaceToDepthSupported(infos[0],
524  infos[1],
525  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
527  case LayerType::Splitter:
528  {
529  std::vector<TensorInfo> outputInfos;
530  for (uint32_t i = 1; i < infos.size(); i++)
531  {
532  outputInfos.push_back(infos[i]);
533  }
534  return IsSplitterSupported(infos[0],
535  {outputInfos.begin(), outputInfos.end()},
536  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
538  }
539  case LayerType::Stack:
540  {
541  std::vector<const TensorInfo*> inputInfos;
542  for (uint32_t i = 0; i < infos.size() - 1; i++)
543  {
544  inputInfos.push_back(&infos[i]);
545  }
546  return IsStackSupported(inputInfos,
547  infos[infos.size() - 1],
548  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
550  }
552  return IsStridedSliceSupported(infos[0],
553  infos[1],
554  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
557  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
559  return IsTransposeSupported(infos[0],
560  infos[1],
561  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
564  {
565  if (infos.size() != 4)
566  {
567  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
568  "TensorInfos should be of format: {input, output, weights, biases}.");
569  }
570 
571  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
572  if (infos[3] == TensorInfo())
573  {
574  return IsTransposeConvolution2dSupported(infos[0],
575  infos[1],
576  desc,
577  infos[2],
578  EmptyOptional(),
580  }
581  else
582  {
583  return IsTransposeConvolution2dSupported(infos[0],
584  infos[1],
585  desc,
586  infos[2],
587  infos[3],
589  }
590  }
593  infos[1],
594  infos[2],
595  infos[3],
596  infos[4],
597  infos[5],
598  *(PolymorphicDowncast<const
600  lstmParamsInfo.value(),
602  case LayerType::Unmap:
603  return true;
604  default:
605  // layers not supported in cl by default:
606  // debug, detectionpostprocess, fakequantization,
607  // precompiled, standin, switch, pooling3d
608  return false;
609  }
610 }
611 
613  const TensorInfo& output,
614  const ActivationDescriptor& descriptor,
615  Optional<std::string&> reasonIfUnsupported) const
616 {
619  input,
620  output,
621  descriptor);
622 }
623 
625  const TensorInfo& input1,
626  const TensorInfo& output,
627  Optional<std::string&> reasonIfUnsupported) const
628 {
631  input0,
632  input1,
633  output,
634  nullptr);
635 }
636 
638  const TensorInfo& output,
639  const ArgMinMaxDescriptor& descriptor,
640  Optional<std::string&> reasonIfUnsupported) const
641 {
642 
645  input,
646  output,
647  descriptor);
648 }
649 
651  const TensorInfo& inputY,
652  const TensorInfo& output,
653  const BatchMatMulDescriptor& descriptor,
654  Optional<std::string&> reasonIfUnsupported) const
655 {
658  inputX,
659  inputY,
660  output,
661  descriptor);
662 }
663 
665  const TensorInfo& output,
666  const TensorInfo& mean,
667  const TensorInfo& var,
668  const TensorInfo& beta,
669  const TensorInfo& gamma,
670  const BatchNormalizationDescriptor& descriptor,
671  Optional<std::string&> reasonIfUnsupported) const
672 {
675  input,
676  output,
677  mean,
678  var,
679  beta,
680  gamma,
681  descriptor,
682  nullptr);
683 }
684 
686  const TensorInfo& output,
687  const BatchToSpaceNdDescriptor& descriptor,
688  Optional<std::string&> reasonIfUnsupported) const
689 {
692  input,
693  output,
694  descriptor);
695 }
696 
698  const TensorInfo& output,
699  Optional<std::string&> reasonIfUnsupported) const
700 {
703  input,
704  output);
705 }
706 
708  const TensorInfo& output,
709  const ChannelShuffleDescriptor& descriptor,
710  Optional<std::string&> reasonIfUnsupported) const
711 {
714  input,
715  output,
716  descriptor);
717 }
718 
720  const TensorInfo& input1,
721  const TensorInfo& output,
722  const ComparisonDescriptor& descriptor,
723  Optional<std::string&> reasonIfUnsupported) const
724 {
727  input0,
728  input1,
729  output,
730  descriptor);
731 }
732 
733 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
734  const TensorInfo& output,
735  const OriginsDescriptor& descriptor,
736  Optional<std::string&> reasonIfUnsupported) const
737 {
738  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
739  {
740  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
741  return false;
742  }
743 
744  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
745  if(concatInnerAxis < 3) // Width, height, or channels
746  {
749  inputs,
750  output,
751  descriptor);
752  }
753  else if (concatInnerAxis == 3)
754  {
755  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
756  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
757  for (auto& input : inputs)
758  {
759  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
760  {
761  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
762  return false;
763  }
764  }
765  return true; // Sub-tensors support concat along batch
766  }
767  else // > 4 dimensions not supported.
768  {
769  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
770  return false;
771  }
772 }
773 
775  Optional<std::string&> reasonIfUnsupported) const
776 {
779  output);
780 }
781 
783  const TensorInfo& output,
784  Optional<std::string&> reasonIfUnsupported) const
785 {
788  input,
789  output);
790 }
791 
793  const TensorInfo& output,
794  Optional<std::string&> reasonIfUnsupported) const
795 {
798  input,
799  output);
800 }
801 
803  const TensorInfo& output,
804  const Convolution2dDescriptor& descriptor,
805  const TensorInfo& weights,
806  const Optional<TensorInfo>& biases,
807  Optional<std::string&> reasonIfUnsupported) const
808 {
809  bool isFastMathEnabled = false;
810 #if defined(ARMCOMPUTECL_ENABLED)
811  if (m_ModelContextPtr)
812  {
813  if (m_ModelContextPtr.get() != nullptr)
814  {
815  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
816  if (modelOptions)
817  {
818  isFastMathEnabled = modelOptions->IsFastMathEnabled();
819  }
820  }
821  }
822 #endif
823 
826  input,
827  output,
828  descriptor,
829  weights,
830  biases,
831  isFastMathEnabled,
832  nullptr);
833 }
834 
836  const TensorInfo& output,
837  const Convolution3dDescriptor& descriptor,
838  const TensorInfo& weights,
839  const Optional<TensorInfo>& biases,
840  Optional<std::string&> reasonIfUnsupported) const
841 {
842  bool isFastMathEnabled = false;
843 #if defined(ARMCOMPUTECL_ENABLED)
844  if (m_ModelContextPtr)
845 {
846  if (m_ModelContextPtr.get() != nullptr)
847  {
848  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
849  if (modelOptions)
850  {
851  isFastMathEnabled = modelOptions->IsFastMathEnabled();
852  }
853  }
854 }
855 #endif
856 
859  input,
860  output,
861  descriptor,
862  weights,
863  biases,
864  isFastMathEnabled,
865  nullptr);
866 }
867 
869  const TensorInfo& output,
870  Optional<std::string&> reasonIfUnsupported) const
871 {
874  input,
875  output);
876 }
877 
879  const TensorInfo& output,
880  const DepthToSpaceDescriptor& descriptor,
881  Optional<std::string&> reasonIfUnsupported) const
882 {
885  input,
886  output,
887  descriptor);
888 }
889 
891  const TensorInfo& output,
892  const DepthwiseConvolution2dDescriptor& descriptor,
893  const TensorInfo& weights,
894  const Optional<TensorInfo>& biases,
895  Optional<std::string&> reasonIfUnsupported) const
896 {
899  input,
900  output,
901  descriptor,
902  weights,
903  biases,
904  nullptr);
905 }
906 
908  const TensorInfo& output,
909  const DepthwiseConvolution2dDescriptor& descriptor,
910  const TensorInfo& weights,
911  const Optional<TensorInfo>& biases,
912  Optional<std::string&> reasonIfUnsupported) const
913 {
916  input,
917  output,
918  descriptor,
919  weights,
920  biases,
921  nullptr);
922 }
923 
924 
926  const TensorInfo& input1,
927  const TensorInfo& output,
928  Optional<std::string&> reasonIfUnsupported) const
929 {
932  input0,
933  input1,
934  output,
935  nullptr);
936 }
937 
939  const TensorInfo& output,
940  const ElementwiseUnaryDescriptor& descriptor,
941  Optional<std::string&> reasonIfUnsupported) const
942 {
943  switch(descriptor.m_Operation)
944  {
945  case UnaryOperation::Abs:
948  input,
949  output);
950  case UnaryOperation::Exp:
953  input,
954  output);
955  case UnaryOperation::Log:
958  input,
959  output);
963  input,
964  output);
965  case UnaryOperation::Neg:
968  input,
969  output);
973  input,
974  output);
975  case UnaryOperation::Sin:
978  input,
979  output);
983  input,
984  output);
985  default:
986  return false;
987  }
988 }
989 
991  const TensorInfo& output,
992  const FillDescriptor& descriptor,
993  Optional<std::string&> reasonIfUnsupported) const
994 {
995  armnn::IgnoreUnused(input);
998 
999  return IsClBackendSupported(reasonIfUnsupported);
1000 }
1001 
1003  const TensorInfo& output,
1004  Optional<std::string&> reasonIfUnsupported) const
1005 {
1008  input,
1009  output);
1010 }
1011 
1013  const TensorInfo& output,
1014  const TensorInfo& weights,
1015  const TensorInfo& biases,
1016  const FullyConnectedDescriptor& descriptor,
1017  Optional<std::string&> reasonIfUnsupported) const
1018 {
1021  input,
1022  output,
1023  weights,
1024  biases,
1025  descriptor,
1026  nullptr);
1027 }
1028 
1030  const TensorInfo& input1,
1031  const TensorInfo& output,
1032  const GatherDescriptor& descriptor,
1033  Optional<std::string&> reasonIfUnsupported) const
1034 {
1037  input0,
1038  input1,
1039  output,
1040  descriptor);
1041 }
1042 
1044  const TensorInfo& input1,
1045  const TensorInfo& output,
1046  Optional<std::string&> reasonIfUnsupported) const
1047 {
1050  input0,
1051  input1,
1052  output);
1053 }
1054 
1056  Optional<std::string&> reasonIfUnsupported) const
1057 {
1058  return IsClBackendSupported(reasonIfUnsupported, input);
1059 }
1060 
1062  const TensorInfo& output,
1063  const InstanceNormalizationDescriptor& descriptor,
1064  Optional<std::string&> reasonIfUnsupported) const
1065 {
1068  input,
1069  output,
1070  descriptor);
1071 }
1072 
1074  const TensorInfo& output,
1075  const L2NormalizationDescriptor& descriptor,
1076  Optional<std::string&> reasonIfUnsupported) const
1077 {
1080  input,
1081  output,
1082  descriptor);
1083 }
1084 
1086  const TensorInfo& input1,
1087  const TensorInfo& output,
1088  const LogicalBinaryDescriptor& descriptor,
1089  Optional<std::string&> reasonIfUnsupported) const
1090 {
1092 
1093  switch(descriptor.m_Operation)
1094  {
1098  input0,
1099  input1,
1100  output);
1104  input0,
1105  input1,
1106  output);
1107  default:
1108  return false;
1109  }
1110 }
1111 
1112 
1114  const TensorInfo& output,
1115  const LogSoftmaxDescriptor& descriptor,
1116  Optional<std::string&> reasonIfUnsupported) const
1117 {
1120  input,
1121  output,
1122  descriptor);
1123 }
1124 
1126  const TensorInfo& outputStateIn,
1127  const TensorInfo& cellStateIn,
1128  const TensorInfo& scratchBuffer,
1129  const TensorInfo& outputStateOut,
1130  const TensorInfo& cellStateOut,
1131  const TensorInfo& output,
1132  const LstmDescriptor& descriptor,
1133  const LstmInputParamsInfo& paramsInfo,
1134  Optional<std::string&> reasonIfUnsupported) const
1135 {
1138  input,
1139  outputStateIn,
1140  cellStateIn,
1141  scratchBuffer,
1143  cellStateOut,
1144  output,
1145  descriptor,
1146  paramsInfo);
1147 }
1148 
1150  const TensorInfo& input1,
1151  const TensorInfo& output,
1152  Optional<std::string&> reasonIfUnsupported) const
1153 {
1156  input0,
1157  input1,
1158  output);
1159 }
1160 
1162  const TensorInfo& output,
1163  const MeanDescriptor& descriptor,
1164  Optional<std::string&> reasonIfUnsupported) const
1165 {
1168  input,
1169  output,
1170  descriptor);
1171 }
1172 
1174  const TensorInfo& input1,
1175  const TensorInfo& output,
1176  Optional<std::string&> reasonIfUnsupported) const
1177 {
1180  input0,
1181  input1,
1182  output);
1183 }
1184 
1186  const TensorInfo& input1,
1187  const TensorInfo& output,
1188  Optional<std::string&> reasonIfUnsupported) const
1189 {
1192  input0,
1193  input1,
1194  output,
1195  nullptr);
1196 }
1197 
1199  const TensorInfo& output,
1200  const NormalizationDescriptor& descriptor,
1201  Optional<std::string&> reasonIfUnsupported) const
1202 {
1204 }
1205 
1207  Optional<std::string&> reasonIfUnsupported) const
1208 {
1209  return IsClBackendSupported(reasonIfUnsupported, output);
1210 }
1211 
1213  const TensorInfo& output,
1214  const PadDescriptor& descriptor,
1215  Optional<std::string&> reasonIfUnsupported) const
1216 {
1219  input,
1220  output,
1221  descriptor);
1222 }
1223 
1225  const TensorInfo& output,
1226  const PermuteDescriptor& descriptor,
1227  Optional<std::string&> reasonIfUnsupported) const
1228 {
1230 }
1231 
1233  const TensorInfo& output,
1234  const Pooling2dDescriptor& descriptor,
1235  Optional<std::string&> reasonIfUnsupported) const
1236 {
1238 }
1239 
1241  const TensorInfo& output,
1242  const Pooling3dDescriptor& descriptor,
1243  Optional<std::string&> reasonIfUnsupported) const
1244 {
1246 }
1247 
1249  const armnn::TensorInfo &alpha,
1250  const armnn::TensorInfo &output,
1251  armnn::Optional<std::string &> reasonIfUnsupported) const
1252 {
1254 }
1255 
1257  const TensorInfo& previousOutputIn,
1258  const TensorInfo& previousCellStateIn,
1259  const TensorInfo& outputStateOut,
1260  const TensorInfo& cellStateOut,
1261  const TensorInfo& output,
1262  const QLstmDescriptor& descriptor,
1263  const LstmInputParamsInfo& paramsInfo,
1264  Optional<std::string&> reasonIfUnsupported) const
1265 {
1266  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1272  {
1275  input,
1278  cellStateOut,
1280  output,
1281  descriptor,
1282  paramsInfo);
1283  }
1284  else
1285  {
1286  return false;
1287  }
1288 }
1289 
1291  const TensorInfo& previousCellStateIn,
1292  const TensorInfo& previousOutputIn,
1293  const TensorInfo& cellStateOut,
1294  const TensorInfo& output,
1295  const QuantizedLstmInputParamsInfo& paramsInfo,
1296  Optional<std::string&> reasonIfUnsupported) const
1297 {
1300  input,
1303  cellStateOut,
1304  output,
1305  paramsInfo);
1306 }
1307 
1309  const TensorInfo& output,
1310  Optional<std::string&> reasonIfUnsupported) const
1311 {
1314  input,
1315  output);
1316 }
1317 
1319  const TensorInfo& output,
1320  const ReduceDescriptor& descriptor,
1321  Optional<std::string&> reasonIfUnsupported) const
1322 {
1325  input,
1326  output,
1327  descriptor);
1328 }
1329 
1331  const TensorInfo& output,
1332  const ReshapeDescriptor& descriptor,
1333  Optional<std::string&> reasonIfUnsupported) const
1334 {
1337 }
1338 
1340  const TensorInfo& output,
1341  const ResizeDescriptor& descriptor,
1342  Optional<std::string&> reasonIfUnsupported) const
1343 {
1345 }
1346 
1348  const TensorInfo& output,
1349  const SliceDescriptor& descriptor,
1350  Optional<std::string&> reasonIfUnsupported) const
1351 {
1353 }
1354 
1356  const TensorInfo& output,
1357  const SoftmaxDescriptor& descriptor,
1358  Optional<std::string&> reasonIfUnsupported) const
1359 {
1361 }
1362 
1364  const TensorInfo& output,
1365  const SpaceToBatchNdDescriptor& descriptor,
1366  Optional<std::string&> reasonIfUnsupported) const
1367 {
1370  input,
1371  output,
1372  descriptor);
1373 }
1374 
1376  const TensorInfo& output,
1377  const SpaceToDepthDescriptor& descriptor,
1378  Optional<std::string&> reasonIfUnsupported) const
1379 {
1382  input,
1383  output,
1384  descriptor);
1385 }
1386 
1388  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1389  const ViewsDescriptor& descriptor,
1390  Optional<std::string&> reasonIfUnsupported) const
1391 {
1392 #if defined(ARMCOMPUTECL_ENABLED)
1393  // Split along the last dimension, cannot use sub-tensors
1394  // as width and height of the sub-tensors do not match
1395  // the width and height of the parent tensor
1396  // in case of input with more than 2D.
1397  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1398  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1399  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1400  {
1403  input,
1404  outputs,
1405  *splitAxis.begin());
1406  }
1407 #endif
1409  for (auto output : outputs)
1410  {
1411  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1412  {
1413  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1414  return false;
1415  }
1416  }
1417  return true;
1418 }
1419 
1420 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1421  const TensorInfo& output,
1422  const StackDescriptor& descriptor,
1423  Optional<std::string&> reasonIfUnsupported) const
1424 {
1427  inputs,
1428  output,
1429  descriptor);
1430 }
1431 
1433  const TensorInfo& output,
1434  const StridedSliceDescriptor& descriptor,
1435  Optional<std::string&> reasonIfUnsupported) const
1436 {
1439  input,
1440  output,
1441  descriptor);
1442 }
1443 
1445  const TensorInfo& input1,
1446  const TensorInfo& output,
1447  Optional<std::string&> reasonIfUnsupported) const
1448 {
1451  input0,
1452  input1,
1453  output,
1454  nullptr);
1455 }
1456 
1458  const TensorInfo& output,
1459  const TransposeConvolution2dDescriptor& descriptor,
1460  const TensorInfo& weights,
1461  const Optional<TensorInfo>& biases,
1462  Optional<std::string&> reasonIfUnsupported) const
1463 {
1466  input,
1467  output,
1468  descriptor,
1469  weights,
1470  biases);
1471 }
1472 
1474  const TensorInfo& output,
1475  const TransposeDescriptor& descriptor,
1476  Optional<std::string&> reasonIfUnsupported) const
1477 {
1479 }
1480 
1482  const TensorInfo& outputStateIn,
1483  const TensorInfo& cellStateIn,
1484  const TensorInfo& outputStateOut,
1485  const TensorInfo& cellStateOut,
1486  const TensorInfo& output,
1487  const UnidirectionalSequenceLstmDescriptor& descriptor,
1488  const LstmInputParamsInfo& paramsInfo,
1489  Optional<std::string&> reasonIfUnsupported) const
1490 {
1493  input,
1494  outputStateIn,
1495  cellStateIn,
1497  cellStateOut,
1498  output,
1499  descriptor,
1500  paramsInfo);
1501 }
1502 
1503 } // namespace armnn
armnn::ClLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1444
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
ClConstantWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::LayerType::Floor
@ Floor
ClConcatWorkload.hpp
armnn::ClReduceWorkloadValidate
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: ClReduceWorkload.cpp:18
armnn::ClSoftmaxWorkloadValidate
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: ClSoftmaxWorkload.cpp:17
armnn::ClLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:697
armnn::LayerType::MemCopy
@ MemCopy
ClDequantizeWorkload.hpp
armnn::ClLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &ouput, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:938
ClSplitterWorkload.hpp
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::LayerType::Transpose
@ Transpose
ClFillWorkload.hpp
armnn::ClLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:733
ClSoftmaxWorkload.hpp
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
armnn::ClLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1347
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:912
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:737
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1437
ClLogicalNotWorkload.hpp
ClCastWorkload.hpp
ClBatchToSpaceNdWorkload.hpp
armnn::ClL2NormalizationWorkloadValidate
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: ClL2NormalizationFloatWorkload.cpp:17
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor)
Definition: ClBatchMatMulWorkload.cpp:29
armnn::ClLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1212
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:109
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1143
armnn::ClLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1473
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:157
ClPooling3dWorkload.hpp
armnn::ClReshapeWorkloadValidate
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClReshapeWorkload.cpp:15
armnn::ClLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1355
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::ClLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:990
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1198
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::ClAbsWorkloadValidate
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClAbsWorkload.cpp:19
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1175
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::LayerType::Input
@ Input
armnn::ClMeanValidate
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: ClMeanWorkload.cpp:17
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: ClLayerSupport.cpp:148
armnn::ClLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:184
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1049
armnn::ClBatchToSpaceNdWorkloadValidate
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: ClBatchToSpaceNdWorkload.cpp:57
armnn::LayerType::Maximum
@ Maximum
ClSubtractionWorkload.hpp
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:475
ClLogicalOrWorkload.hpp
ClNormalizationFloatWorkload.hpp
ClMinimumWorkload.hpp
armnn::LayerType::Quantize
@ Quantize
ClConvertFp32ToFp16Workload.hpp
armnn::ClLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:612
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1531
armnn::ClLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1240
armnn::ClLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:890
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:932
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1250
armnn::LayerType::Subtraction
@ Subtraction
armnn::ClLogicalAndWorkloadValidate
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalAndWorkload.cpp:20
armnn::ClLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:774
armnn::ClPreluWorkloadValidate
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: ClPreluWorkload.cpp:16
armnn::ClLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:1481
ClSqrtWorkload.hpp
ClLogSoftmaxWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
ClLstmFloatWorkload.hpp
armnn::UnaryOperation::Exp
@ Exp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:399
ClPreluWorkload.hpp
ClDepthwiseConvolutionWorkload.hpp
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1485
armnn::ClFloorWorkloadValidate
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClFloorFloatWorkload.cpp:14
ClTransposeWorkload.hpp
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
ClReshapeWorkload.hpp
armnn::ClSliceWorkloadValidate
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: ClSliceWorkload.cpp:18
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::ClPermuteWorkloadValidate
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: ClPermuteWorkload.cpp:17
armnn::UnaryOperation::Neg
@ Neg
armnn::ClGatherNdWorkloadValidate
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: ClGatherNdWorkload.cpp:16
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::ClComparisonWorkloadValidate
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: ClComparisonWorkload.cpp:24
armnn::LayerType::Merge
@ Merge
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:551
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:224
armnn::ClLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:907
armnn::LayerType::Permute
@ Permute
armnn::ClSqrtWorkloadValidate
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSqrtWorkload.cpp:19
armnn::ClSpaceToBatchNdWorkloadValidate
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: ClSpaceToBatchNdWorkload.cpp:23
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
ClAdditionWorkload.hpp
armnn::ClLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:1029
ClConvertFp16ToFp32Workload.hpp
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::BatchNormalization
@ BatchNormalization
ClBackendModelContext.hpp
armnn::LayerType::Reduce
@ Reduce
ClFloorFloatWorkload.hpp
armnn::ClLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1125
ClGatherWorkload.hpp
armnn::ClLayerSupport::ClLayerSupport
ClLayerSupport()
Definition: ClLayerSupport.cpp:179
ClFullyConnectedWorkload.hpp
armnn::ClConvertFp32ToFp16WorkloadValidate
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp32ToFp16Workload.cpp:44
armnn::LayerType::Division
@ Division
ClTransposeConvolution2dWorkload.hpp
armnn::ClLogicalNotWorkloadValidate
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogicalNotWorkload.cpp:20
ClAbsWorkload.hpp
ClChannelShuffleWorkload.hpp
armnn::ClLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1339
armnn::ClChannelShuffleValidate
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: ClChannelShuffleWorkload.cpp:20
armnn::ClLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1387
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ClTransposeWorkloadValidate
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: ClTransposeWorkload.cpp:17
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::ClSplitterWorkloadValidate
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: ClSplitterWorkload.cpp:31
armnn::ClLogSoftmaxWorkloadValidate
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: ClLogSoftmaxWorkload.cpp:17
ClL2NormalizationFloatWorkload.hpp
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:843
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::ClGatherWorkloadValidate
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: ClGatherWorkload.cpp:15
ClStackWorkload.hpp
ClMultiplicationWorkload.hpp
ClLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1022
ClNegWorkload.hpp
ClConvolution2dWorkload.hpp
armnn::LayerType::Activation
@ Activation
armnn::ClConcatWorkloadValidate
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: ClConcatWorkload.cpp:27
armnn::LayerType::Normalization
@ Normalization
ClResizeWorkload.hpp
ClSpaceToBatchNdWorkload.hpp
armnn::ClLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:624
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
ClActivationWorkload.hpp
armnn::ClTransposeConvolution2dWorkloadValidate
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: ClTransposeConvolution2dWorkload.cpp:26
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
ClDivisionWorkload.hpp
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:893
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:466
armnn::LayerType::Reshape
@ Reshape
ClSpaceToDepthWorkload.hpp
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
ClMaximumWorkload.hpp
armnn::LayerType::Fill
@ Fill
armnn::ClLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:707
armnn::LayerType::Resize
@ Resize
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
ClQuantizedLstmWorkload.hpp
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
ClReduceWorkload.hpp
ClQLstmWorkload.hpp
armnn::ClLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1375
armnn::ClConvertFp16ToFp32WorkloadValidate
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp16ToFp32Workload.cpp:44
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
armnn::LayerType::Rank
@ Rank
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:627
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1119
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::ClLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:868
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
ArmComputeTensorUtils.hpp
armnn::ClQuantizedLstmWorkloadValidate
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: ClQuantizedLstmWorkload.cpp:18
ClLogicalAndWorkload.hpp
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
ClMeanWorkload.hpp
armnn::ClPooling3dWorkloadValidate
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: ClPooling3dWorkload.cpp:18
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::ClDepthToSpaceWorkloadValidate
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: ClDepthToSpaceWorkload.cpp:22
InternalTypes.hpp
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:777
ClComparisonWorkload.hpp
armnn::ClNormalizationWorkloadValidate
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: ClNormalizationFloatWorkload.cpp:19
armnn::ClActivationWorkloadValidate
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: ClActivationWorkload.cpp:17
armnn::ClLogWorkloadValidate
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogWorkload.cpp:18
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::ClLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1061
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1509
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:556
ClLogWorkload.hpp
armnn::ClLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1206
armnn::LayerType::GatherNd
@ GatherNd
armnn::ClArgMinMaxWorkloadValidate
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: ClArgMinMaxWorkload.cpp:31
armnn::ClLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:878
ArmComputeUtils.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::ClLogicalOrWorkloadValidate
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalOrWorkload.cpp:20
armnn::ILayerSupport::var
const TensorInfo const TensorInfo const TensorInfo & var
Definition: ILayerSupport.hpp:64
armnn::ClLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1290
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:502
ClPermuteWorkload.hpp
armnn::LayerType::Lstm
@ Lstm
armnn::ClLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1043
armnn::ClLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1073
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:796
ClArgMinMaxWorkload.hpp
armnn::ClStridedSliceWorkloadValidate
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: ClStridedSliceWorkload.cpp:27
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1327
LayerSupportCommon.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ClNegWorkloadValidate
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClNegWorkload.cpp:18
ClRsqrtWorkload.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::Status
Status
Definition: Types.hpp:42
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
ClConvolution3dWorkload.hpp
ClSliceWorkload.hpp
armnn::ClLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1055
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
ClStridedSliceWorkload.hpp
armnn::ClLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:802
armnn::ClInstanceNormalizationWorkloadValidate
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: ClInstanceNormalizationWorkload.cpp:18
ClSinWorkload.hpp
ClExpWorkload.hpp
armnn::ClConstantWorkloadValidate
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
Definition: ClConstantWorkload.cpp:18
BackendRegistry.hpp
armnn::ClLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1224
ClDepthToSpaceWorkload.hpp
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::UnaryOperation::Abs
@ Abs
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::LayerType::Unmap
@ Unmap
armnn::ClCastValidate
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClCastWorkload.cpp:20
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClUnidirectionalSequenceLstmFloatWorkload.cpp:508
armnn::LayerType::Mean
@ Mean
armnn::ClLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:792
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::ClExpWorkloadValidate
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClExpWorkload.cpp:18
armnn::ClLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1248
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:181
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:970
armnn::ClResizeWorkloadValidate
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: ClResizeWorkload.cpp:22
armnn::ClConvolution3dWorkloadValidate
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution3dWorkload.cpp:23
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1387
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
ClBatchNormalizationFloatWorkload.hpp
ClInstanceNormalizationWorkload.hpp
armnn::ClLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1330
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::ClLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1002
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:339
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1465
ClBatchMatMulWorkload.hpp
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::UnaryOperation::Sin
@ Sin
armnn::ClLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:664
armnn::Optional
Definition: Optional.hpp:270
ClPadWorkload.hpp
armnn::ClLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1185
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::ClLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1012
armnn::LayerType::Concat
@ Concat
armnn::ClLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1161
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ClLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:782
armnn::ClLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:637
armnn::LayerType::Cast
@ Cast
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
ClGatherNdWorkload.hpp
IgnoreUnused.hpp
armnn::ClSpaceToDepthWorkloadValidate
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: ClSpaceToDepthWorkload.cpp:54
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ClStackWorkloadValidate
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: ClStackWorkload.cpp:29
armnn::ClLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:1085
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
ClPooling2dWorkload.hpp
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::ClLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1149
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::ClLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:685
armnn::ClLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1232
armnn::ClLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1363
armnn::LayerType::Output
@ Output
ClQuantizeWorkload.hpp
armnn::ClLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1198
armnn::ClLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1113
armnn::ClLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1318
armnn::ClLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1420
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
ClUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::ClLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1308
armnn::LayerType::Prelu
@ Prelu
armnn::ClSinWorkloadValidate
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSinWorkload.cpp:18
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::ClLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:835
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:815
armnn::ClRsqrtWorkloadValidate
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClRsqrtWorkload.cpp:18
armnn::ClLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1173
armnn::ClLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:719
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::ClPadValidate
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: ClPadWorkload.cpp:62
armnn::LayerType::Dequantize
@ Dequantize
armnn::ClLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1432
armnn::ClLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:650
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:390
armnn::ClLstmFloatWorkloadValidate
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClLstmFloatWorkload.cpp:244
ClBackendId.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::UnaryOperation::Log
@ Log
armnn::ClLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1256
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:990
armnn::ClLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1457
armnn::ClLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:925
armnn::ClQLstmWorkloadValidate
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClQLstmWorkload.cpp:247