ArmNN
 23.05
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
86 #endif
87 
88 
89 namespace armnn
90 {
91 
92 namespace
93 {
94 
95 template<unsigned int FilterSize>
96 bool IsMatchingSize2d(const TensorInfo& weightInfo)
97 {
98  // Width & Height must match.
99  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
100 }
101 
102 template<uint32_t ValidStride>
103 bool IsMatchingStride(uint32_t actualStride)
104 {
105  return ValidStride == actualStride;
106 }
107 
108 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
109 bool IsMatchingStride(uint32_t actualStride)
110 {
111  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
112 }
113 
114 template<typename ... Args>
115 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
116 {
117  IgnoreUnused(reasonIfUnsupported, (args)...);
118 #if defined(ARMCOMPUTECL_ENABLED)
119  return true;
120 #else
121  if (reasonIfUnsupported)
122  {
123  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
124  }
125  return false;
126 #endif
127 }
128 
129 #if defined(ARMCOMPUTECL_ENABLED)
130 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
131 #else
132 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
133 #endif
134 
135 #if defined(ARMCOMPUTECL_ENABLED)
136 template<class FuncType, class... Args>
137 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
138 {
139  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
140  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
141  if (!supported && reasonIfUnsupported)
142  {
143  reasonIfUnsupported.value() = aclStatus.error_description();
144  }
145  return supported;
146 }
147 
148 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
149  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
150 #else
151 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
152  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
153 #endif
154 
155 template<typename FloatFunc, typename Uint8Func, typename ... Params>
156 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
157  DataType dataType,
158  FloatFunc floatFuncPtr,
159  Uint8Func uint8FuncPtr,
160  Params&&... params)
161 {
162  return IsClBackendSupported(reasonIfUnsupported) &&
163  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
164  dataType,
165  floatFuncPtr,
166  floatFuncPtr,
167  uint8FuncPtr,
168  &FalseFunc<>,
169  &FalseFunc<>,
170  std::forward<Params>(params)...);
171 }
172 } // anonymous namespace
173 
175  : m_ModelContextPtr(modelContextPtr)
176 {
177 }
178 
180  : m_ModelContextPtr(nullptr)
181 {
182 }
183 
185  const std::vector<TensorInfo>& infos,
186  const BaseDescriptor& descriptor,
187  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
188  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
189  Optional<std::string&> reasonIfUnsupported) const
190 {
191  switch (type)
192  {
194  return IsActivationSupported(infos[0],
195  infos[1],
196  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
198  case LayerType::Addition:
200  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
203  return IsArgMinMaxSupported(infos[0],
204  infos[1],
205  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
208  return IsBatchMatMulSupported(infos[0],
209  infos[1],
210  infos[2],
211  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
214  return IsBatchNormalizationSupported(infos[0],
215  infos[1],
216  infos[2],
217  infos[3],
218  infos[4],
219  infos[5],
220  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
221  (&descriptor)),
224  return IsBatchToSpaceNdSupported(infos[0],
225  infos[1],
226  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
228  case LayerType::Cast:
229  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
231  return IsChannelShuffleSupported(infos[0],
232  infos[1],
233  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
236  return IsComparisonSupported(infos[0],
237  infos[1],
238  infos[2],
239  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
241  case LayerType::Concat:
242  {
243  std::vector<const TensorInfo*> inputInfos;
244  for (uint32_t i = 0; i < (infos.size() - 1); i++)
245  {
246  inputInfos.push_back(&infos[i]);
247  }
248  return IsConcatSupported(inputInfos,
249  infos[infos.size() - 1],
250  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
252  }
253  case LayerType::Constant:
254  return IsConstantSupported(infos[0], reasonIfUnsupported);
256  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
258  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
260  {
261  if (infos.size() != 4)
262  {
263  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
264  "TensorInfos should be of format: {input, output, weights, biases}.");
265  }
266 
267  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
268  if (infos[3] == TensorInfo())
269  {
270  return IsConvolution2dSupported(infos[0],
271  infos[1],
272  desc,
273  infos[2],
274  EmptyOptional(),
276  }
277  else
278  {
279  return IsConvolution2dSupported(infos[0],
280  infos[1],
281  desc,
282  infos[2],
283  infos[3],
285  }
286  }
288  {
289  if (infos.size() != 4)
290  {
291  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
292  "TensorInfos should be of format: {input, output, weights, biases}.");
293  }
294 
295  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
296  if (infos[3] == TensorInfo())
297  {
298  return IsConvolution3dSupported(infos[0],
299  infos[1],
300  desc,
301  infos[2],
302  EmptyOptional(),
304  }
305  else
306  {
307  return IsConvolution3dSupported(infos[0],
308  infos[1],
309  desc,
310  infos[2],
311  infos[3],
313  }
314  }
316  return IsDepthToSpaceSupported(infos[0],
317  infos[1],
318  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
321  {
322  if (infos.size() != 4)
323  {
324  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
325  "TensorInfos should be of format: {input, output, weights, biases}.");
326  }
327 
328  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
329  if (infos[3] == TensorInfo())
330  {
331  return IsDepthwiseConvolutionSupported(infos[0],
332  infos[1],
333  desc,
334  infos[2],
335  EmptyOptional(),
337  }
338  else
339  {
340  return IsDepthwiseConvolutionSupported(infos[0],
341  infos[1],
342  desc,
343  infos[2],
344  infos[3],
346  }
347  }
349  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
350  case LayerType::Division:
352  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
355  {
356  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
357 
358  switch (desc.m_Operation)
359  {
363  infos[0],
364  infos[1],
365  infos[2],
366  nullptr);
370  infos[0],
371  infos[1],
372  infos[2],
373  nullptr);
377  infos[0],
378  infos[1],
379  infos[2]);
383  infos[0],
384  infos[1],
385  infos[2]);
389  infos[0],
390  infos[1],
391  infos[2],
392  nullptr);
396  infos[0],
397  infos[1],
398  infos[2],
399  nullptr);
400  default:
401  return false;
402  }
403  }
405  return IsElementwiseUnarySupported(infos[0],
406  infos[1],
407  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
409  case LayerType::Fill:
410  return IsFillSupported(infos[0],
411  infos[1],
412  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
414  case LayerType::Floor:
415  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
417  return IsFullyConnectedSupported(infos[0],
418  infos[1],
419  infos[2],
420  infos[3],
421  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
423  case LayerType::Gather:
424  return IsGatherSupported(infos[0],
425  infos[1],
426  infos[2],
427  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
429  case LayerType::GatherNd:
430  return IsGatherNdSupported(infos[0],
431  infos[1],
432  infos[2],
434  case LayerType::Input:
435  return IsInputSupported(infos[0], reasonIfUnsupported);
437  return IsInstanceNormalizationSupported(infos[0],
438  infos[1],
439  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
440  (&descriptor)),
443  return IsL2NormalizationSupported(infos[0],
444  infos[1],
445  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
448  return IsLogicalBinarySupported(infos[0],
449  infos[1],
450  infos[2],
451  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
454  return IsLogSoftmaxSupported(infos[0],
455  infos[1],
456  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
458  case LayerType::Lstm:
459  return IsLstmSupported(infos[0],
460  infos[1],
461  infos[2],
462  infos[3],
463  infos[4],
464  infos[5],
465  infos[6],
466  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
467  lstmParamsInfo.value(),
469  case LayerType::Map:
470  return true;
471  case LayerType::MemCopy:
472  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
475  case LayerType::Merge:
476  return LayerSupportBase::IsMergeSupported(infos[0],
477  infos[1],
478  infos[2],
480  case LayerType::Maximum:
482  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
484  case LayerType::Mean:
485  return IsMeanSupported(infos[0],
486  infos[1],
487  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
489  case LayerType::Minimum:
491  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
495  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
498  return IsNormalizationSupported(infos[0],
499  infos[1],
500  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
502  case LayerType::Output:
503  return IsOutputSupported(infos[0], reasonIfUnsupported);
504  case LayerType::Pad:
505  return IsPadSupported(infos[0],
506  infos[1],
507  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
509  case LayerType::Permute:
510  return IsPermuteSupported(infos[0],
511  infos[1],
512  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
515  return IsPooling2dSupported(infos[0],
516  infos[1],
517  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
520  return IsPooling3dSupported(infos[0],
521  infos[1],
522  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
524  case LayerType::Prelu:
525  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
526  case LayerType::QLstm:
527  return IsQLstmSupported(infos[0],
528  infos[1],
529  infos[2],
530  infos[3],
531  infos[4],
532  infos[5],
533  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
534  lstmParamsInfo.value(),
536  case LayerType::Quantize:
537  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
539  return IsQuantizedLstmSupported(infos[0],
540  infos[1],
541  infos[2],
542  infos[3],
543  infos[4],
544  quantizedLstmParamsInfo.value(),
546  case LayerType::Rank:
547  return true;
548  case LayerType::Reduce:
549  return IsReduceSupported(infos[0],
550  infos[1],
551  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
553  case LayerType::Reshape:
554  return IsReshapeSupported(infos[0],
555  infos[1],
556  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
558  case LayerType::Resize:
559  return IsResizeSupported(infos[0],
560  infos[1],
561  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
563  case LayerType::Shape:
564  return LayerSupportBase::IsShapeSupported(infos[0],
565  infos[1],
567  case LayerType::Slice:
568  return IsSliceSupported(infos[0],
569  infos[1],
570  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
572  case LayerType::Softmax:
573  return IsSoftmaxSupported(infos[0],
574  infos[1],
575  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
578  return IsSpaceToBatchNdSupported(infos[0],
579  infos[1],
580  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
583  return IsSpaceToDepthSupported(infos[0],
584  infos[1],
585  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
587  case LayerType::Splitter:
588  {
589  std::vector<TensorInfo> outputInfos;
590  for (uint32_t i = 1; i < infos.size(); i++)
591  {
592  outputInfos.push_back(infos[i]);
593  }
594  return IsSplitterSupported(infos[0],
595  {outputInfos.begin(), outputInfos.end()},
596  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
598  }
599  case LayerType::Stack:
600  {
601  std::vector<const TensorInfo*> inputInfos;
602  for (uint32_t i = 0; i < infos.size() - 1; i++)
603  {
604  inputInfos.push_back(&infos[i]);
605  }
606  return IsStackSupported(inputInfos,
607  infos[infos.size() - 1],
608  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
610  }
612  return IsStridedSliceSupported(infos[0],
613  infos[1],
614  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
618  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
621  return IsTransposeSupported(infos[0],
622  infos[1],
623  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
626  {
627  if (infos.size() != 4)
628  {
629  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
630  "TensorInfos should be of format: {input, output, weights, biases}.");
631  }
632 
633  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
634  if (infos[3] == TensorInfo())
635  {
636  return IsTransposeConvolution2dSupported(infos[0],
637  infos[1],
638  desc,
639  infos[2],
640  EmptyOptional(),
642  }
643  else
644  {
645  return IsTransposeConvolution2dSupported(infos[0],
646  infos[1],
647  desc,
648  infos[2],
649  infos[3],
651  }
652  }
655  infos[1],
656  infos[2],
657  infos[3],
658  infos[4],
659  infos[5],
660  *(PolymorphicDowncast<const
662  lstmParamsInfo.value(),
664  case LayerType::Unmap:
665  return true;
666  default:
667  // layers not supported in cl by default:
668  // debug, detectionpostprocess, fakequantization,
669  // precompiled, standin, switch, pooling3d
670  return false;
671  }
672 }
673 
675  const TensorInfo& output,
676  const ActivationDescriptor& descriptor,
677  Optional<std::string&> reasonIfUnsupported) const
678 {
681  input,
682  output,
683  descriptor);
684 }
685 
687  const TensorInfo& input1,
688  const TensorInfo& output,
689  Optional<std::string&> reasonIfUnsupported) const
690 {
693  input0,
694  input1,
695  output,
696  nullptr);
697 }
698 
700  const TensorInfo& output,
701  const ArgMinMaxDescriptor& descriptor,
702  Optional<std::string&> reasonIfUnsupported) const
703 {
704 
707  input,
708  output,
709  descriptor);
710 }
711 
713  const TensorInfo& inputY,
714  const TensorInfo& output,
715  const BatchMatMulDescriptor& descriptor,
716  Optional<std::string&> reasonIfUnsupported) const
717 {
720  inputX,
721  inputY,
722  output,
723  descriptor,
724  nullptr);
725 }
726 
728  const TensorInfo& output,
729  const TensorInfo& mean,
730  const TensorInfo& var,
731  const TensorInfo& beta,
732  const TensorInfo& gamma,
733  const BatchNormalizationDescriptor& descriptor,
734  Optional<std::string&> reasonIfUnsupported) const
735 {
738  input,
739  output,
740  mean,
741  var,
742  beta,
743  gamma,
744  descriptor,
745  nullptr);
746 }
747 
749  const TensorInfo& output,
750  const BatchToSpaceNdDescriptor& descriptor,
751  Optional<std::string&> reasonIfUnsupported) const
752 {
755  input,
756  output,
757  descriptor);
758 }
759 
761  const TensorInfo& output,
762  Optional<std::string&> reasonIfUnsupported) const
763 {
766  input,
767  output);
768 }
769 
771  const TensorInfo& output,
772  const ChannelShuffleDescriptor& descriptor,
773  Optional<std::string&> reasonIfUnsupported) const
774 {
777  input,
778  output,
779  descriptor);
780 }
781 
783  const TensorInfo& input1,
784  const TensorInfo& output,
785  const ComparisonDescriptor& descriptor,
786  Optional<std::string&> reasonIfUnsupported) const
787 {
790  input0,
791  input1,
792  output,
793  descriptor);
794 }
795 
796 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
797  const TensorInfo& output,
798  const OriginsDescriptor& descriptor,
799  Optional<std::string&> reasonIfUnsupported) const
800 {
801  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
802  {
803  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
804  return false;
805  }
806 
807  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
808  if(concatInnerAxis < 3) // Width, height, or channels
809  {
812  inputs,
813  output,
814  descriptor);
815  }
816  else if (concatInnerAxis == 3)
817  {
818  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
819  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
820  for (auto& input : inputs)
821  {
822  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
823  {
824  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
825  return false;
826  }
827  }
828  return true; // Sub-tensors support concat along batch
829  }
830  else // > 4 dimensions not supported.
831  {
832  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
833  return false;
834  }
835 }
836 
838  Optional<std::string&> reasonIfUnsupported) const
839 {
842  output);
843 }
844 
846  const TensorInfo& output,
847  Optional<std::string&> reasonIfUnsupported) const
848 {
851  input,
852  output);
853 }
854 
856  const TensorInfo& output,
857  Optional<std::string&> reasonIfUnsupported) const
858 {
861  input,
862  output);
863 }
864 
866  const TensorInfo& output,
867  const Convolution2dDescriptor& descriptor,
868  const TensorInfo& weights,
869  const Optional<TensorInfo>& biases,
870  Optional<std::string&> reasonIfUnsupported) const
871 {
872  bool isFastMathEnabled = false;
873 #if defined(ARMCOMPUTECL_ENABLED)
874  if (m_ModelContextPtr)
875  {
876  if (m_ModelContextPtr.get() != nullptr)
877  {
878  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
879  if (modelOptions)
880  {
881  isFastMathEnabled = modelOptions->IsFastMathEnabled();
882  }
883  }
884  }
885 #endif
886 
889  input,
890  output,
891  descriptor,
892  weights,
893  biases,
894  isFastMathEnabled,
895  nullptr);
896 }
897 
899  const TensorInfo& output,
900  const Convolution3dDescriptor& descriptor,
901  const TensorInfo& weights,
902  const Optional<TensorInfo>& biases,
903  Optional<std::string&> reasonIfUnsupported) const
904 {
905  bool isFastMathEnabled = false;
906 #if defined(ARMCOMPUTECL_ENABLED)
907  if (m_ModelContextPtr)
908 {
909  if (m_ModelContextPtr.get() != nullptr)
910  {
911  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
912  if (modelOptions)
913  {
914  isFastMathEnabled = modelOptions->IsFastMathEnabled();
915  }
916  }
917 }
918 #endif
919 
922  input,
923  output,
924  descriptor,
925  weights,
926  biases,
927  isFastMathEnabled,
928  nullptr);
929 }
930 
932  const TensorInfo& output,
933  Optional<std::string&> reasonIfUnsupported) const
934 {
937  input,
938  output);
939 }
940 
942  const TensorInfo& output,
943  const DepthToSpaceDescriptor& descriptor,
944  Optional<std::string&> reasonIfUnsupported) const
945 {
948  input,
949  output,
950  descriptor);
951 }
952 
954  const TensorInfo& output,
955  const DepthwiseConvolution2dDescriptor& descriptor,
956  const TensorInfo& weights,
957  const Optional<TensorInfo>& biases,
958  Optional<std::string&> reasonIfUnsupported) const
959 {
962  input,
963  output,
964  descriptor,
965  weights,
966  biases,
967  nullptr);
968 }
969 
971  const TensorInfo& output,
972  const DepthwiseConvolution2dDescriptor& descriptor,
973  const TensorInfo& weights,
974  const Optional<TensorInfo>& biases,
975  Optional<std::string&> reasonIfUnsupported) const
976 {
979  input,
980  output,
981  descriptor,
982  weights,
983  biases,
984  nullptr);
985 }
986 
987 
989  const TensorInfo& input1,
990  const TensorInfo& output,
991  Optional<std::string&> reasonIfUnsupported) const
992 {
995  input0,
996  input1,
997  output,
998  nullptr);
999 }
1000 
1002  const TensorInfo& output,
1003  const ElementwiseUnaryDescriptor& descriptor,
1004  Optional<std::string&> reasonIfUnsupported) const
1005 {
1006  switch(descriptor.m_Operation)
1007  {
1008  case UnaryOperation::Abs:
1011  input,
1012  output);
1013  case UnaryOperation::Exp:
1016  input,
1017  output);
1018  case UnaryOperation::Log:
1021  input,
1022  output);
1026  input,
1027  output);
1028  case UnaryOperation::Neg:
1031  input,
1032  output);
1033  case UnaryOperation::Rsqrt:
1036  input,
1037  output);
1038  case UnaryOperation::Sin:
1041  input,
1042  output);
1043  case UnaryOperation::Sqrt:
1046  input,
1047  output);
1048  default:
1049  return false;
1050  }
1051 }
1052 
1054  const TensorInfo& output,
1055  const FillDescriptor& descriptor,
1056  Optional<std::string&> reasonIfUnsupported) const
1057 {
1058  armnn::IgnoreUnused(input);
1061 
1062  return IsClBackendSupported(reasonIfUnsupported);
1063 }
1064 
1066  const TensorInfo& output,
1067  Optional<std::string&> reasonIfUnsupported) const
1068 {
1071  input,
1072  output);
1073 }
1074 
1076  const TensorInfo& output,
1077  const TensorInfo& weights,
1078  const TensorInfo& biases,
1079  const FullyConnectedDescriptor& descriptor,
1080  Optional<std::string&> reasonIfUnsupported) const
1081 {
1084  input,
1085  output,
1086  weights,
1087  biases,
1088  descriptor,
1089  nullptr);
1090 }
1091 
1093  const TensorInfo& input1,
1094  const TensorInfo& output,
1095  const GatherDescriptor& descriptor,
1096  Optional<std::string&> reasonIfUnsupported) const
1097 {
1100  input0,
1101  input1,
1102  output,
1103  descriptor);
1104 }
1105 
1107  const TensorInfo& input1,
1108  const TensorInfo& output,
1109  Optional<std::string&> reasonIfUnsupported) const
1110 {
1113  input0,
1114  input1,
1115  output);
1116 }
1117 
1119  Optional<std::string&> reasonIfUnsupported) const
1120 {
1121  return IsClBackendSupported(reasonIfUnsupported, input);
1122 }
1123 
1125  const TensorInfo& output,
1126  const InstanceNormalizationDescriptor& descriptor,
1127  Optional<std::string&> reasonIfUnsupported) const
1128 {
1131  input,
1132  output,
1133  descriptor);
1134 }
1135 
1137  const TensorInfo& output,
1138  const L2NormalizationDescriptor& descriptor,
1139  Optional<std::string&> reasonIfUnsupported) const
1140 {
1143  input,
1144  output,
1145  descriptor);
1146 }
1147 
1149  const TensorInfo& input1,
1150  const TensorInfo& output,
1151  const LogicalBinaryDescriptor& descriptor,
1152  Optional<std::string&> reasonIfUnsupported) const
1153 {
1155 
1156  switch(descriptor.m_Operation)
1157  {
1161  input0,
1162  input1,
1163  output);
1167  input0,
1168  input1,
1169  output);
1170  default:
1171  return false;
1172  }
1173 }
1174 
1175 
1177  const TensorInfo& output,
1178  const LogSoftmaxDescriptor& descriptor,
1179  Optional<std::string&> reasonIfUnsupported) const
1180 {
1183  input,
1184  output,
1185  descriptor);
1186 }
1187 
1189  const TensorInfo& outputStateIn,
1190  const TensorInfo& cellStateIn,
1191  const TensorInfo& scratchBuffer,
1192  const TensorInfo& outputStateOut,
1193  const TensorInfo& cellStateOut,
1194  const TensorInfo& output,
1195  const LstmDescriptor& descriptor,
1196  const LstmInputParamsInfo& paramsInfo,
1197  Optional<std::string&> reasonIfUnsupported) const
1198 {
1201  input,
1202  outputStateIn,
1203  cellStateIn,
1204  scratchBuffer,
1206  cellStateOut,
1207  output,
1208  descriptor,
1209  paramsInfo);
1210 }
1211 
1213  const TensorInfo& input1,
1214  const TensorInfo& output,
1215  Optional<std::string&> reasonIfUnsupported) const
1216 {
1219  input0,
1220  input1,
1221  output);
1222 }
1223 
1225  const TensorInfo& output,
1226  const MeanDescriptor& descriptor,
1227  Optional<std::string&> reasonIfUnsupported) const
1228 {
1231  input,
1232  output,
1233  descriptor);
1234 }
1235 
1237  const TensorInfo& input1,
1238  const TensorInfo& output,
1239  Optional<std::string&> reasonIfUnsupported) const
1240 {
1243  input0,
1244  input1,
1245  output);
1246 }
1247 
1249  const TensorInfo& input1,
1250  const TensorInfo& output,
1251  Optional<std::string&> reasonIfUnsupported) const
1252 {
1255  input0,
1256  input1,
1257  output,
1258  nullptr);
1259 }
1260 
1262  const TensorInfo& output,
1263  const NormalizationDescriptor& descriptor,
1264  Optional<std::string&> reasonIfUnsupported) const
1265 {
1267 }
1268 
1270  Optional<std::string&> reasonIfUnsupported) const
1271 {
1272  return IsClBackendSupported(reasonIfUnsupported, output);
1273 }
1274 
1276  const TensorInfo& output,
1277  const PadDescriptor& descriptor,
1278  Optional<std::string&> reasonIfUnsupported) const
1279 {
1282  input,
1283  output,
1284  descriptor);
1285 }
1286 
1288  const TensorInfo& output,
1289  const PermuteDescriptor& descriptor,
1290  Optional<std::string&> reasonIfUnsupported) const
1291 {
1293 }
1294 
1296  const TensorInfo& output,
1297  const Pooling2dDescriptor& descriptor,
1298  Optional<std::string&> reasonIfUnsupported) const
1299 {
1301 }
1302 
1304  const TensorInfo& output,
1305  const Pooling3dDescriptor& descriptor,
1306  Optional<std::string&> reasonIfUnsupported) const
1307 {
1309 }
1310 
1312  const armnn::TensorInfo &alpha,
1313  const armnn::TensorInfo &output,
1314  armnn::Optional<std::string &> reasonIfUnsupported) const
1315 {
1317 }
1318 
1320  const TensorInfo& previousOutputIn,
1321  const TensorInfo& previousCellStateIn,
1322  const TensorInfo& outputStateOut,
1323  const TensorInfo& cellStateOut,
1324  const TensorInfo& output,
1325  const QLstmDescriptor& descriptor,
1326  const LstmInputParamsInfo& paramsInfo,
1327  Optional<std::string&> reasonIfUnsupported) const
1328 {
1329  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1335  {
1338  input,
1341  cellStateOut,
1343  output,
1344  descriptor,
1345  paramsInfo);
1346  }
1347  else
1348  {
1349  return false;
1350  }
1351 }
1352 
1354  const TensorInfo& previousCellStateIn,
1355  const TensorInfo& previousOutputIn,
1356  const TensorInfo& cellStateOut,
1357  const TensorInfo& output,
1358  const QuantizedLstmInputParamsInfo& paramsInfo,
1359  Optional<std::string&> reasonIfUnsupported) const
1360 {
1363  input,
1366  cellStateOut,
1367  output,
1368  paramsInfo);
1369 }
1370 
1372  const TensorInfo& output,
1373  Optional<std::string&> reasonIfUnsupported) const
1374 {
1377  input,
1378  output);
1379 }
1380 
1382  const TensorInfo& output,
1383  const ReduceDescriptor& descriptor,
1384  Optional<std::string&> reasonIfUnsupported) const
1385 {
1388  input,
1389  output,
1390  descriptor);
1391 }
1392 
1394  const TensorInfo& output,
1395  const ReshapeDescriptor& descriptor,
1396  Optional<std::string&> reasonIfUnsupported) const
1397 {
1400 }
1401 
1403  const TensorInfo& output,
1404  const ResizeDescriptor& descriptor,
1405  Optional<std::string&> reasonIfUnsupported) const
1406 {
1408 }
1409 
1411  const TensorInfo& output,
1412  const SliceDescriptor& descriptor,
1413  Optional<std::string&> reasonIfUnsupported) const
1414 {
1416 }
1417 
1419  const TensorInfo& output,
1420  const SoftmaxDescriptor& descriptor,
1421  Optional<std::string&> reasonIfUnsupported) const
1422 {
1424 }
1425 
1427  const TensorInfo& output,
1428  const SpaceToBatchNdDescriptor& descriptor,
1429  Optional<std::string&> reasonIfUnsupported) const
1430 {
1433  input,
1434  output,
1435  descriptor);
1436 }
1437 
1439  const TensorInfo& output,
1440  const SpaceToDepthDescriptor& descriptor,
1441  Optional<std::string&> reasonIfUnsupported) const
1442 {
1445  input,
1446  output,
1447  descriptor);
1448 }
1449 
1451  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1452  const ViewsDescriptor& descriptor,
1453  Optional<std::string&> reasonIfUnsupported) const
1454 {
1455 #if defined(ARMCOMPUTECL_ENABLED)
1456  // Split along the last dimension, cannot use sub-tensors
1457  // as width and height of the sub-tensors do not match
1458  // the width and height of the parent tensor
1459  // in case of input with more than 2D.
1460  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1461  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1462  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1463  {
1466  input,
1467  outputs,
1468  *splitAxis.begin());
1469  }
1470 #endif
1472  for (auto output : outputs)
1473  {
1474  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1475  {
1476  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1477  return false;
1478  }
1479  }
1480  return true;
1481 }
1482 
1483 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1484  const TensorInfo& output,
1485  const StackDescriptor& descriptor,
1486  Optional<std::string&> reasonIfUnsupported) const
1487 {
1490  inputs,
1491  output,
1492  descriptor);
1493 }
1494 
1496  const TensorInfo& output,
1497  const StridedSliceDescriptor& descriptor,
1498  Optional<std::string&> reasonIfUnsupported) const
1499 {
1502  input,
1503  output,
1504  descriptor);
1505 }
1506 
1508  const TensorInfo& input1,
1509  const TensorInfo& output,
1510  Optional<std::string&> reasonIfUnsupported) const
1511 {
1514  input0,
1515  input1,
1516  output,
1517  nullptr);
1518 }
1519 
1521  const TensorInfo& output,
1522  const TransposeConvolution2dDescriptor& descriptor,
1523  const TensorInfo& weights,
1524  const Optional<TensorInfo>& biases,
1525  Optional<std::string&> reasonIfUnsupported) const
1526 {
1529  input,
1530  output,
1531  descriptor,
1532  weights,
1533  biases);
1534 }
1535 
1537  const TensorInfo& output,
1538  const TransposeDescriptor& descriptor,
1539  Optional<std::string&> reasonIfUnsupported) const
1540 {
1542 }
1543 
1545  const TensorInfo& outputStateIn,
1546  const TensorInfo& cellStateIn,
1547  const TensorInfo& outputStateOut,
1548  const TensorInfo& cellStateOut,
1549  const TensorInfo& output,
1550  const UnidirectionalSequenceLstmDescriptor& descriptor,
1551  const LstmInputParamsInfo& paramsInfo,
1552  Optional<std::string&> reasonIfUnsupported) const
1553 {
1556  input,
1557  outputStateIn,
1558  cellStateIn,
1560  cellStateOut,
1561  output,
1562  descriptor,
1563  paramsInfo);
1564 }
1565 
1566 } // namespace armnn
armnn::ClLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1507
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
ClConstantWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::LayerType::Floor
@ Floor
ClConcatWorkload.hpp
armnn::ClReduceWorkloadValidate
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: ClReduceWorkload.cpp:18
armnn::ClSoftmaxWorkloadValidate
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: ClSoftmaxWorkload.cpp:17
armnn::ClLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:760
armnn::LayerType::MemCopy
@ MemCopy
ClDequantizeWorkload.hpp
ClSplitterWorkload.hpp
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::LayerType::Transpose
@ Transpose
ClFillWorkload.hpp
armnn::ClLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:796
ClSoftmaxWorkload.hpp
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
armnn::ClLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1410
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:932
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:757
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1457
ClLogicalNotWorkload.hpp
ClCastWorkload.hpp
ClBatchToSpaceNdWorkload.hpp
armnn::ClL2NormalizationWorkloadValidate
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: ClL2NormalizationFloatWorkload.cpp:17
armnn::ClLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1275
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::ClLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1536
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
ClPooling3dWorkload.hpp
armnn::ClReshapeWorkloadValidate
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClReshapeWorkload.cpp:15
armnn::ClLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1418
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::ClLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1053
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1218
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::ClAbsWorkloadValidate
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClAbsWorkload.cpp:19
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1195
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::LayerType::Input
@ Input
armnn::ClMeanValidate
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: ClMeanWorkload.cpp:17
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: ClLayerSupport.cpp:148
armnn::ClLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:184
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1069
armnn::ClBatchToSpaceNdWorkloadValidate
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: ClBatchToSpaceNdWorkload.cpp:20
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
ClSubtractionWorkload.hpp
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:495
ClLogicalOrWorkload.hpp
ClNormalizationFloatWorkload.hpp
ClMinimumWorkload.hpp
armnn::LayerType::Quantize
@ Quantize
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchMatMulWorkload.cpp:23
ClConvertFp32ToFp16Workload.hpp
armnn::ClLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:674
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1551
armnn::ClLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1303
armnn::ClLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:953
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:952
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1270
armnn::LayerType::Subtraction
@ Subtraction
armnn::ClLogicalAndWorkloadValidate
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalAndWorkload.cpp:20
armnn::ClLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:837
armnn::ClPreluWorkloadValidate
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: ClPreluWorkload.cpp:16
armnn::ClLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:1544
ClSqrtWorkload.hpp
ClLogSoftmaxWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
ClLstmFloatWorkload.hpp
armnn::UnaryOperation::Exp
@ Exp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:419
ClPreluWorkload.hpp
ClDepthwiseConvolutionWorkload.hpp
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1505
armnn::ClFloorWorkloadValidate
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClFloorFloatWorkload.cpp:14
ClTransposeWorkload.hpp
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
ClReshapeWorkload.hpp
armnn::ClSliceWorkloadValidate
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: ClSliceWorkload.cpp:18
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::ClPermuteWorkloadValidate
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: ClPermuteWorkload.cpp:17
armnn::UnaryOperation::Neg
@ Neg
armnn::ClGatherNdWorkloadValidate
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: ClGatherNdWorkload.cpp:16
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::ClComparisonWorkloadValidate
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: ClComparisonWorkload.cpp:24
armnn::LayerType::Merge
@ Merge
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:551
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::ClLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:970
armnn::LayerType::Permute
@ Permute
armnn::ClSqrtWorkloadValidate
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSqrtWorkload.cpp:19
armnn::ClSpaceToBatchNdWorkloadValidate
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: ClSpaceToBatchNdWorkload.cpp:23
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
ClAdditionWorkload.hpp
armnn::ClLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:1092
ClConvertFp16ToFp32Workload.hpp
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::BatchNormalization
@ BatchNormalization
ClBackendModelContext.hpp
armnn::LayerType::Reduce
@ Reduce
ClFloorFloatWorkload.hpp
armnn::ClLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1188
ClGatherWorkload.hpp
armnn::ClLayerSupport::ClLayerSupport
ClLayerSupport()
Definition: ClLayerSupport.cpp:179
ClFullyConnectedWorkload.hpp
armnn::ClConvertFp32ToFp16WorkloadValidate
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp32ToFp16Workload.cpp:44
armnn::LayerType::Division
@ Division
ClTransposeConvolution2dWorkload.hpp
armnn::ClLogicalNotWorkloadValidate
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogicalNotWorkload.cpp:20
ClAbsWorkload.hpp
ClChannelShuffleWorkload.hpp
armnn::ClLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1402
armnn::ClChannelShuffleValidate
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: ClChannelShuffleWorkload.cpp:20
armnn::ClLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1450
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ClTransposeWorkloadValidate
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: ClTransposeWorkload.cpp:17
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::ClSplitterWorkloadValidate
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: ClSplitterWorkload.cpp:31
armnn::ClLogSoftmaxWorkloadValidate
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: ClLogSoftmaxWorkload.cpp:17
ClL2NormalizationFloatWorkload.hpp
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:863
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::ClGatherWorkloadValidate
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: ClGatherWorkload.cpp:15
ClStackWorkload.hpp
ClMultiplicationWorkload.hpp
ClLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1042
ClNegWorkload.hpp
ClConvolution2dWorkload.hpp
armnn::LayerType::Activation
@ Activation
armnn::BinaryOperation::Sub
@ Sub
armnn::ClConcatWorkloadValidate
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: ClConcatWorkload.cpp:27
armnn::LayerType::Normalization
@ Normalization
ClResizeWorkload.hpp
ClSpaceToBatchNdWorkload.hpp
armnn::ClLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:686
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
ClActivationWorkload.hpp
armnn::ClTransposeConvolution2dWorkloadValidate
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: ClTransposeConvolution2dWorkload.cpp:26
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
ClDivisionWorkload.hpp
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:913
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::LayerType::Reshape
@ Reshape
ClSpaceToDepthWorkload.hpp
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
ClMaximumWorkload.hpp
armnn::LayerType::Fill
@ Fill
armnn::ClLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:770
armnn::LayerType::Resize
@ Resize
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
ClQuantizedLstmWorkload.hpp
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
ClReduceWorkload.hpp
ClQLstmWorkload.hpp
armnn::ClLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1438
armnn::ClConvertFp16ToFp32WorkloadValidate
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp16ToFp32Workload.cpp:44
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
armnn::LayerType::Rank
@ Rank
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:647
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1139
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::ClLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:931
armnn::BinaryOperation::Mul
@ Mul
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
ArmComputeTensorUtils.hpp
armnn::ClQuantizedLstmWorkloadValidate
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: ClQuantizedLstmWorkload.cpp:18
ClLogicalAndWorkload.hpp
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
ClMeanWorkload.hpp
armnn::ClPooling3dWorkloadValidate
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: ClPooling3dWorkload.cpp:18
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::ClDepthToSpaceWorkloadValidate
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: ClDepthToSpaceWorkload.cpp:22
InternalTypes.hpp
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:797
ClComparisonWorkload.hpp
armnn::ClNormalizationWorkloadValidate
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: ClNormalizationFloatWorkload.cpp:19
armnn::ClActivationWorkloadValidate
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: ClActivationWorkload.cpp:17
armnn::ClLogWorkloadValidate
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogWorkload.cpp:18
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::ClLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1124
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1529
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:576
ClLogWorkload.hpp
armnn::ClLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1269
armnn::LayerType::GatherNd
@ GatherNd
armnn::ClArgMinMaxWorkloadValidate
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: ClArgMinMaxWorkload.cpp:31
armnn::ClLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:941
ArmComputeUtils.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::ClLogicalOrWorkloadValidate
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalOrWorkload.cpp:20
armnn::ILayerSupport::var
const TensorInfo const TensorInfo const TensorInfo & var
Definition: ILayerSupport.hpp:64
armnn::ClLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1353
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:522
ClPermuteWorkload.hpp
armnn::LayerType::Lstm
@ Lstm
armnn::ClLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1106
armnn::ClLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1136
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:816
ClArgMinMaxWorkload.hpp
armnn::ClStridedSliceWorkloadValidate
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: ClStridedSliceWorkload.cpp:27
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1347
LayerSupportCommon.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ClNegWorkloadValidate
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClNegWorkload.cpp:18
ClRsqrtWorkload.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::Status
Status
Definition: Types.hpp:42
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
ClConvolution3dWorkload.hpp
armnn::BinaryOperation::Minimum
@ Minimum
ClSliceWorkload.hpp
armnn::ClLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1118
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
ClStridedSliceWorkload.hpp
armnn::ClLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:865
armnn::ClInstanceNormalizationWorkloadValidate
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: ClInstanceNormalizationWorkload.cpp:18
ClSinWorkload.hpp
ClExpWorkload.hpp
armnn::ClConstantWorkloadValidate
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
Definition: ClConstantWorkload.cpp:18
BackendRegistry.hpp
armnn::ClLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1287
ClDepthToSpaceWorkload.hpp
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::BinaryOperation::Maximum
@ Maximum
armnn::UnaryOperation::Abs
@ Abs
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::LayerType::Unmap
@ Unmap
armnn::ClCastValidate
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClCastWorkload.cpp:20
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClUnidirectionalSequenceLstmFloatWorkload.cpp:508
armnn::LayerType::Mean
@ Mean
armnn::ClLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:855
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::ClExpWorkloadValidate
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClExpWorkload.cpp:18
armnn::ClLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1311
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:990
armnn::ClResizeWorkloadValidate
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: ClResizeWorkload.cpp:22
armnn::ClConvolution3dWorkloadValidate
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution3dWorkload.cpp:23
armnn::BinaryOperation::Add
@ Add
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1407
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
ClBatchNormalizationFloatWorkload.hpp
ClInstanceNormalizationWorkload.hpp
armnn::ClLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1393
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::ClLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1065
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:359
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1485
ClBatchMatMulWorkload.hpp
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::UnaryOperation::Sin
@ Sin
armnn::ClLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:727
armnn::Optional
Definition: Optional.hpp:270
ClPadWorkload.hpp
armnn::ClLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1248
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::ClLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1075
armnn::LayerType::Concat
@ Concat
armnn::ClLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1224
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ClLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:845
armnn::ClLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:699
armnn::LayerType::Cast
@ Cast
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
ClGatherNdWorkload.hpp
IgnoreUnused.hpp
armnn::ClSpaceToDepthWorkloadValidate
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: ClSpaceToDepthWorkload.cpp:54
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ClStackWorkloadValidate
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: ClStackWorkload.cpp:29
armnn::ClLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: ClLayerSupport.cpp:1148
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
ClPooling2dWorkload.hpp
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::ClLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1212
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::ClLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:748
armnn::ClLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1295
armnn::ClLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1426
armnn::LayerType::Output
@ Output
ClQuantizeWorkload.hpp
armnn::ClLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1261
armnn::ClLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1176
armnn::ClLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1381
armnn::ClLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1483
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
ClUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::BinaryOperation::Div
@ Div
armnn::ClLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1371
armnn::LayerType::Prelu
@ Prelu
armnn::ClSinWorkloadValidate
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSinWorkload.cpp:18
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::ClLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:898
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:835
armnn::ClRsqrtWorkloadValidate
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClRsqrtWorkload.cpp:18
armnn::ClLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1236
armnn::ClLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:782
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::ClPadValidate
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: ClPadWorkload.cpp:62
armnn::LayerType::Dequantize
@ Dequantize
armnn::ClLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1495
armnn::ClLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:712
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:390
armnn::ClLstmFloatWorkloadValidate
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClLstmFloatWorkload.cpp:244
ClBackendId.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::UnaryOperation::Log
@ Log
armnn::ClLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1319
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1010
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ClLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1520
armnn::ClLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:988
armnn::ClLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: ClLayerSupport.cpp:1001
armnn::ClQLstmWorkloadValidate
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClQLstmWorkload.cpp:247