ArmNN
 24.05
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
91 #endif
92 
93 
94 namespace armnn
95 {
96 
97 namespace
98 {
99 
100 template<unsigned int FilterSize>
101 bool IsMatchingSize2d(const TensorInfo& weightInfo)
102 {
103  // Width & Height must match.
104  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
105 }
106 
107 template<uint32_t ValidStride>
108 bool IsMatchingStride(uint32_t actualStride)
109 {
110  return ValidStride == actualStride;
111 }
112 
113 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
114 bool IsMatchingStride(uint32_t actualStride)
115 {
116  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
117 }
118 
119 template<typename ... Args>
120 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
121 {
122  IgnoreUnused(reasonIfUnsupported, (args)...);
123 #if defined(ARMCOMPUTECL_ENABLED)
124  return true;
125 #else
126  if (reasonIfUnsupported)
127  {
128  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
129  }
130  return false;
131 #endif
132 }
133 
134 #if defined(ARMCOMPUTECL_ENABLED)
135 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
136 #else
137 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
138 #endif
139 
140 #if defined(ARMCOMPUTECL_ENABLED)
141 template<class FuncType, class... Args>
142 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
143 {
144  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
145  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
146  if (!supported && reasonIfUnsupported)
147  {
148  reasonIfUnsupported.value() = aclStatus.error_description();
149  }
150  return supported;
151 }
152 
153 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
154  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
155 #else
156 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
157  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
158 #endif
159 
160 template<typename FloatFunc, typename Uint8Func, typename ... Params>
161 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
162  DataType dataType,
163  FloatFunc floatFuncPtr,
164  Uint8Func uint8FuncPtr,
165  Params&&... params)
166 {
167  return IsClBackendSupported(reasonIfUnsupported) &&
168  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
169  dataType,
170  floatFuncPtr,
171  floatFuncPtr,
172  uint8FuncPtr,
173  &FalseFunc<>,
174  &FalseFunc<>,
175  std::forward<Params>(params)...);
176 }
177 } // anonymous namespace
178 
180  : m_ModelContextPtr(modelContextPtr)
181 {
182 }
183 
185  : m_ModelContextPtr(nullptr)
186 {
187 }
188 
190  const std::vector<TensorInfo>& infos,
191  const BaseDescriptor& descriptor,
192  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
193  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
194  Optional<std::string&> reasonIfUnsupported) const
195 {
196  switch (type)
197  {
199  return IsActivationSupported(infos[0],
200  infos[1],
201  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
202  reasonIfUnsupported);
203  case LayerType::Addition:
205  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
208  return IsArgMinMaxSupported(infos[0],
209  infos[1],
210  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
211  reasonIfUnsupported);
213  return IsBatchMatMulSupported(infos[0],
214  infos[1],
215  infos[2],
216  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
217  reasonIfUnsupported);
219  return IsBatchNormalizationSupported(infos[0],
220  infos[1],
221  infos[2],
222  infos[3],
223  infos[4],
224  infos[5],
225  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
226  (&descriptor)),
227  reasonIfUnsupported);
229  return IsBatchToSpaceNdSupported(infos[0],
230  infos[1],
231  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
232  reasonIfUnsupported);
233  case LayerType::Cast:
234  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
236  return IsChannelShuffleSupported(infos[0],
237  infos[1],
238  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
239  reasonIfUnsupported);
241  return IsComparisonSupported(infos[0],
242  infos[1],
243  infos[2],
244  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
245  reasonIfUnsupported);
246  case LayerType::Concat:
247  {
248  std::vector<const TensorInfo*> inputInfos;
249  for (uint32_t i = 0; i < (infos.size() - 1); i++)
250  {
251  inputInfos.push_back(&infos[i]);
252  }
253  return IsConcatSupported(inputInfos,
254  infos[infos.size() - 1],
255  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
256  reasonIfUnsupported);
257  }
258  case LayerType::Constant:
259  return IsConstantSupported(infos[0], reasonIfUnsupported);
261  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
263  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
265  {
266  if (infos.size() != 4)
267  {
268  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
269  "TensorInfos should be of format: {input, output, weights, biases}.");
270  }
271 
272  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
273  if (infos[3] == TensorInfo())
274  {
275  return IsConvolution2dSupported(infos[0],
276  infos[1],
277  desc,
278  infos[2],
279  EmptyOptional(),
280  reasonIfUnsupported);
281  }
282  else
283  {
284  return IsConvolution2dSupported(infos[0],
285  infos[1],
286  desc,
287  infos[2],
288  infos[3],
289  reasonIfUnsupported);
290  }
291  }
293  {
294  if (infos.size() != 4)
295  {
296  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
297  "TensorInfos should be of format: {input, output, weights, biases}.");
298  }
299 
300  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
301  if (infos[3] == TensorInfo())
302  {
303  return IsConvolution3dSupported(infos[0],
304  infos[1],
305  desc,
306  infos[2],
307  EmptyOptional(),
308  reasonIfUnsupported);
309  }
310  else
311  {
312  return IsConvolution3dSupported(infos[0],
313  infos[1],
314  desc,
315  infos[2],
316  infos[3],
317  reasonIfUnsupported);
318  }
319  }
321  return IsDepthToSpaceSupported(infos[0],
322  infos[1],
323  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
324  reasonIfUnsupported);
326  {
327  if (infos.size() != 4)
328  {
329  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
330  "TensorInfos should be of format: {input, output, weights, biases}.");
331  }
332 
333  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
334  if (infos[3] == TensorInfo())
335  {
336  return IsDepthwiseConvolutionSupported(infos[0],
337  infos[1],
338  desc,
339  infos[2],
340  EmptyOptional(),
341  reasonIfUnsupported);
342  }
343  else
344  {
345  return IsDepthwiseConvolutionSupported(infos[0],
346  infos[1],
347  desc,
348  infos[2],
349  infos[3],
350  reasonIfUnsupported);
351  }
352  }
354  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
355  case LayerType::Division:
357  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
360  {
361  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
362 
363  switch (desc.m_Operation)
364  {
367  reasonIfUnsupported,
368  infos[0],
369  infos[1],
370  infos[2],
371  nullptr);
374  reasonIfUnsupported,
375  infos[0],
376  infos[1],
377  infos[2],
378  nullptr);
381  reasonIfUnsupported,
382  infos[0],
383  infos[1],
384  infos[2]);
387  reasonIfUnsupported,
388  infos[0],
389  infos[1],
390  infos[2]);
393  reasonIfUnsupported,
394  infos[0],
395  infos[1],
396  infos[2],
397  nullptr);
401  reasonIfUnsupported,
402  infos[0],
403  infos[1],
404  infos[2],
405  desc,
406  nullptr);
409  reasonIfUnsupported,
410  infos[0],
411  infos[1],
412  infos[2],
413  nullptr);
414  default:
415  return false;
416  }
417  }
419  return IsElementwiseUnarySupported(infos[0],
420  infos[1],
421  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
422  reasonIfUnsupported);
423  case LayerType::Fill:
424  return IsFillSupported(infos[0],
425  infos[1],
426  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
427  reasonIfUnsupported);
428  case LayerType::Floor:
429  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
431  return IsFullyConnectedSupported(infos[0],
432  infos[1],
433  infos[2],
434  infos[3],
435  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
436  reasonIfUnsupported);
437  case LayerType::Gather:
438  return IsGatherSupported(infos[0],
439  infos[1],
440  infos[2],
441  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
442  reasonIfUnsupported);
443  case LayerType::GatherNd:
444  return IsGatherNdSupported(infos[0],
445  infos[1],
446  infos[2],
447  reasonIfUnsupported);
448  case LayerType::Input:
449  return IsInputSupported(infos[0], reasonIfUnsupported);
451  return IsInstanceNormalizationSupported(infos[0],
452  infos[1],
453  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
454  (&descriptor)),
455  reasonIfUnsupported);
457  return IsL2NormalizationSupported(infos[0],
458  infos[1],
459  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
460  reasonIfUnsupported);
462  return IsLogicalBinarySupported(infos[0],
463  infos[1],
464  infos[2],
465  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
466  reasonIfUnsupported);
468  return IsLogSoftmaxSupported(infos[0],
469  infos[1],
470  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
471  reasonIfUnsupported);
472  case LayerType::Lstm:
473  return IsLstmSupported(infos[0],
474  infos[1],
475  infos[2],
476  infos[3],
477  infos[4],
478  infos[5],
479  infos[6],
480  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
481  lstmParamsInfo.value(),
482  reasonIfUnsupported);
483  case LayerType::Map:
484  return true;
485  case LayerType::MemCopy:
486  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
488  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
489  case LayerType::Merge:
490  return LayerSupportBase::IsMergeSupported(infos[0],
491  infos[1],
492  infos[2],
493  reasonIfUnsupported);
494  case LayerType::Maximum:
496  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
498  case LayerType::Mean:
499  return IsMeanSupported(infos[0],
500  infos[1],
501  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
502  reasonIfUnsupported);
503  case LayerType::Minimum:
505  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
509  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
512  return IsNormalizationSupported(infos[0],
513  infos[1],
514  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
515  reasonIfUnsupported);
516  case LayerType::Output:
517  return IsOutputSupported(infos[0], reasonIfUnsupported);
518  case LayerType::Pad:
519  return IsPadSupported(infos[0],
520  infos[1],
521  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
522  reasonIfUnsupported);
523  case LayerType::Permute:
524  return IsPermuteSupported(infos[0],
525  infos[1],
526  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
527  reasonIfUnsupported);
529  return IsPooling2dSupported(infos[0],
530  infos[1],
531  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
532  reasonIfUnsupported);
534  return IsPooling3dSupported(infos[0],
535  infos[1],
536  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
537  reasonIfUnsupported);
538  case LayerType::Prelu:
539  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
540  case LayerType::QLstm:
541  return IsQLstmSupported(infos[0],
542  infos[1],
543  infos[2],
544  infos[3],
545  infos[4],
546  infos[5],
547  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
548  lstmParamsInfo.value(),
549  reasonIfUnsupported);
550  case LayerType::Quantize:
551  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
553  return IsQuantizedLstmSupported(infos[0],
554  infos[1],
555  infos[2],
556  infos[3],
557  infos[4],
558  quantizedLstmParamsInfo.value(),
559  reasonIfUnsupported);
560  case LayerType::Rank:
561  return true;
562  case LayerType::Reduce:
563  return IsReduceSupported(infos[0],
564  infos[1],
565  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
566  reasonIfUnsupported);
567  case LayerType::Reshape:
568  return IsReshapeSupported(infos[0],
569  infos[1],
570  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
571  reasonIfUnsupported);
572  case LayerType::Resize:
573  return IsResizeSupported(infos[0],
574  infos[1],
575  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
576  reasonIfUnsupported);
578  return IsReverseV2Supported(infos[0],
579  infos[1],
580  infos[2],
581  reasonIfUnsupported);
583  return IsScatterNdSupported(infos[0], // input/shape
584  infos[1], // indices
585  infos[2], // updates
586  infos[3], // output
587  *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
588  reasonIfUnsupported);
589  case LayerType::Shape:
590  return LayerSupportBase::IsShapeSupported(infos[0],
591  infos[1],
592  reasonIfUnsupported);
593  case LayerType::Slice:
594  return IsSliceSupported(infos[0],
595  infos[1],
596  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
597  reasonIfUnsupported);
598  case LayerType::Softmax:
599  return IsSoftmaxSupported(infos[0],
600  infos[1],
601  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
602  reasonIfUnsupported);
604  return IsSpaceToBatchNdSupported(infos[0],
605  infos[1],
606  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
607  reasonIfUnsupported);
609  return IsSpaceToDepthSupported(infos[0],
610  infos[1],
611  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
612  reasonIfUnsupported);
613  case LayerType::Splitter:
614  {
615  std::vector<TensorInfo> outputInfos;
616  for (uint32_t i = 1; i < infos.size(); i++)
617  {
618  outputInfos.push_back(infos[i]);
619  }
620  return IsSplitterSupported(infos[0],
621  {outputInfos.begin(), outputInfos.end()},
622  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
623  reasonIfUnsupported);
624  }
625  case LayerType::Stack:
626  {
627  std::vector<const TensorInfo*> inputInfos;
628  for (uint32_t i = 0; i < infos.size() - 1; i++)
629  {
630  inputInfos.push_back(&infos[i]);
631  }
632  return IsStackSupported(inputInfos,
633  infos[infos.size() - 1],
634  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
635  reasonIfUnsupported);
636  }
638  return IsStridedSliceSupported(infos[0],
639  infos[1],
640  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
641  reasonIfUnsupported);
644  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
646  case LayerType::Tile:
647  return IsTileSupported(infos[0],
648  infos[1],
649  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
650  reasonIfUnsupported);
652  return IsTransposeSupported(infos[0],
653  infos[1],
654  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
655  reasonIfUnsupported);
657  {
658  if (infos.size() != 4)
659  {
660  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
661  "TensorInfos should be of format: {input, output, weights, biases}.");
662  }
663 
664  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
665  if (infos[3] == TensorInfo())
666  {
667  return IsTransposeConvolution2dSupported(infos[0],
668  infos[1],
669  desc,
670  infos[2],
671  EmptyOptional(),
672  reasonIfUnsupported);
673  }
674  else
675  {
676  return IsTransposeConvolution2dSupported(infos[0],
677  infos[1],
678  desc,
679  infos[2],
680  infos[3],
681  reasonIfUnsupported);
682  }
683  }
686  infos[1],
687  infos[2],
688  infos[3],
689  infos[4],
690  infos[5],
691  *(PolymorphicDowncast<const
692  UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
693  lstmParamsInfo.value(),
694  reasonIfUnsupported);
695  case LayerType::Unmap:
696  return true;
697  default:
698  // layers not supported in cl by default:
699  // debug, detectionpostprocess, fakequantization,
700  // precompiled, standin, switch, pooling3d, fused
701  return false;
702  }
703 }
704 
706  const TensorInfo& output,
707  const ActivationDescriptor& descriptor,
708  Optional<std::string&> reasonIfUnsupported) const
709 {
711  reasonIfUnsupported,
712  input,
713  output,
714  descriptor);
715 }
716 
718  const TensorInfo& input1,
719  const TensorInfo& output,
720  Optional<std::string&> reasonIfUnsupported) const
721 {
723  reasonIfUnsupported,
724  input0,
725  input1,
726  output,
727  nullptr);
728 }
729 
731  const TensorInfo& output,
732  const ArgMinMaxDescriptor& descriptor,
733  Optional<std::string&> reasonIfUnsupported) const
734 {
735 
737  reasonIfUnsupported,
738  input,
739  output,
740  descriptor);
741 }
742 
744  const TensorInfo& inputY,
745  const TensorInfo& output,
746  const BatchMatMulDescriptor& descriptor,
747  Optional<std::string&> reasonIfUnsupported) const
748 {
750  reasonIfUnsupported,
751  inputX,
752  inputY,
753  output,
754  descriptor,
755  nullptr);
756 }
757 
759  const TensorInfo& output,
760  const TensorInfo& mean,
761  const TensorInfo& var,
762  const TensorInfo& beta,
763  const TensorInfo& gamma,
764  const BatchNormalizationDescriptor& descriptor,
765  Optional<std::string&> reasonIfUnsupported) const
766 {
768  reasonIfUnsupported,
769  input,
770  output,
771  mean,
772  var,
773  beta,
774  gamma,
775  descriptor,
776  nullptr);
777 }
778 
780  const TensorInfo& output,
781  const BatchToSpaceNdDescriptor& descriptor,
782  Optional<std::string&> reasonIfUnsupported) const
783 {
785  reasonIfUnsupported,
786  input,
787  output,
788  descriptor);
789 }
790 
792  const TensorInfo& output,
793  Optional<std::string&> reasonIfUnsupported) const
794 {
796  reasonIfUnsupported,
797  input,
798  output);
799 }
800 
802  const TensorInfo& output,
803  const ChannelShuffleDescriptor& descriptor,
804  Optional<std::string&> reasonIfUnsupported) const
805 {
807  reasonIfUnsupported,
808  input,
809  output,
810  descriptor);
811 }
812 
814  const TensorInfo& input1,
815  const TensorInfo& output,
816  const ComparisonDescriptor& descriptor,
817  Optional<std::string&> reasonIfUnsupported) const
818 {
820  reasonIfUnsupported,
821  input0,
822  input1,
823  output,
824  descriptor);
825 }
826 
827 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
828  const TensorInfo& output,
829  const OriginsDescriptor& descriptor,
830  Optional<std::string&> reasonIfUnsupported) const
831 {
832  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
833  {
834  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
835  return false;
836  }
837 
838  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
839  if(concatInnerAxis < 3) // Width, height, or channels
840  {
842  reasonIfUnsupported,
843  inputs,
844  output,
845  descriptor);
846  }
847  else if (concatInnerAxis == 3)
848  {
849  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
850  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
851  for (auto& input : inputs)
852  {
853  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
854  {
855  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
856  return false;
857  }
858  }
859  return true; // Sub-tensors support concat along batch
860  }
861  else // > 4 dimensions not supported.
862  {
863  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
864  return false;
865  }
866 }
867 
869  Optional<std::string&> reasonIfUnsupported) const
870 {
872  reasonIfUnsupported,
873  output);
874 }
875 
877  const TensorInfo& output,
878  Optional<std::string&> reasonIfUnsupported) const
879 {
881  reasonIfUnsupported,
882  input,
883  output);
884 }
885 
887  const TensorInfo& output,
888  Optional<std::string&> reasonIfUnsupported) const
889 {
891  reasonIfUnsupported,
892  input,
893  output);
894 }
895 
897  const TensorInfo& output,
898  const Convolution2dDescriptor& descriptor,
899  const TensorInfo& weights,
900  const Optional<TensorInfo>& biases,
901  Optional<std::string&> reasonIfUnsupported) const
902 {
903  bool isFastMathEnabled = false;
904 #if defined(ARMCOMPUTECL_ENABLED)
905  if (m_ModelContextPtr)
906  {
907  if (m_ModelContextPtr.get() != nullptr)
908  {
909  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
910  if (modelOptions)
911  {
912  isFastMathEnabled = modelOptions->IsFastMathEnabled();
913  }
914  }
915  }
916 #endif
917 
919  reasonIfUnsupported,
920  input,
921  output,
922  descriptor,
923  weights,
924  biases,
925  isFastMathEnabled,
926  nullptr);
927 }
928 
930  const TensorInfo& output,
931  const Convolution3dDescriptor& descriptor,
932  const TensorInfo& weights,
933  const Optional<TensorInfo>& biases,
934  Optional<std::string&> reasonIfUnsupported) const
935 {
936  bool isFastMathEnabled = false;
937 #if defined(ARMCOMPUTECL_ENABLED)
938  if (m_ModelContextPtr)
939 {
940  if (m_ModelContextPtr.get() != nullptr)
941  {
942  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
943  if (modelOptions)
944  {
945  isFastMathEnabled = modelOptions->IsFastMathEnabled();
946  }
947  }
948 }
949 #endif
950 
952  reasonIfUnsupported,
953  input,
954  output,
955  descriptor,
956  weights,
957  biases,
958  isFastMathEnabled,
959  nullptr);
960 }
961 
963  const TensorInfo& output,
964  Optional<std::string&> reasonIfUnsupported) const
965 {
967  reasonIfUnsupported,
968  input,
969  output);
970 }
971 
973  const TensorInfo& output,
974  const DepthToSpaceDescriptor& descriptor,
975  Optional<std::string&> reasonIfUnsupported) const
976 {
978  reasonIfUnsupported,
979  input,
980  output,
981  descriptor);
982 }
983 
985  const TensorInfo& output,
986  const DepthwiseConvolution2dDescriptor& descriptor,
987  const TensorInfo& weights,
988  const Optional<TensorInfo>& biases,
989  Optional<std::string&> reasonIfUnsupported) const
990 {
992  reasonIfUnsupported,
993  input,
994  output,
995  descriptor,
996  weights,
997  biases,
998  nullptr);
999 }
1000 
1002  const TensorInfo& output,
1003  const DepthwiseConvolution2dDescriptor& descriptor,
1004  const TensorInfo& weights,
1005  const Optional<TensorInfo>& biases,
1006  Optional<std::string&> reasonIfUnsupported) const
1007 {
1009  reasonIfUnsupported,
1010  input,
1011  output,
1012  descriptor,
1013  weights,
1014  biases,
1015  nullptr);
1016 }
1017 
1018 
1020  const TensorInfo& input1,
1021  const TensorInfo& output,
1022  Optional<std::string&> reasonIfUnsupported) const
1023 {
1025  reasonIfUnsupported,
1026  input0,
1027  input1,
1028  output,
1029  nullptr);
1030 }
1031 
1033  const TensorInfo& output,
1034  const ElementwiseUnaryDescriptor& descriptor,
1035  Optional<std::string&> reasonIfUnsupported) const
1036 {
1037  switch(descriptor.m_Operation)
1038  {
1039  case UnaryOperation::Abs:
1041  reasonIfUnsupported,
1042  input,
1043  output);
1044  case UnaryOperation::Exp:
1046  reasonIfUnsupported,
1047  input,
1048  output);
1049  case UnaryOperation::Log:
1051  reasonIfUnsupported,
1052  input,
1053  output);
1056  reasonIfUnsupported,
1057  input,
1058  output);
1059  case UnaryOperation::Neg:
1061  reasonIfUnsupported,
1062  input,
1063  output);
1064  case UnaryOperation::Rsqrt:
1066  reasonIfUnsupported,
1067  input,
1068  output);
1069  case UnaryOperation::Sin:
1071  reasonIfUnsupported,
1072  input,
1073  output);
1074  case UnaryOperation::Sqrt:
1076  reasonIfUnsupported,
1077  input,
1078  output);
1079  default:
1080  return false;
1081  }
1082 }
1083 
1085  const TensorInfo& output,
1086  const FillDescriptor& descriptor,
1087  Optional<std::string&> reasonIfUnsupported) const
1088 {
1089  armnn::IgnoreUnused(input);
1090  armnn::IgnoreUnused(output);
1091  armnn::IgnoreUnused(descriptor);
1092 
1093  return IsClBackendSupported(reasonIfUnsupported);
1094 }
1095 
1097  const TensorInfo& output,
1098  Optional<std::string&> reasonIfUnsupported) const
1099 {
1101  reasonIfUnsupported,
1102  input,
1103  output);
1104 }
1105 
1107  const TensorInfo& output,
1108  const TensorInfo& weights,
1109  const TensorInfo& biases,
1110  const FullyConnectedDescriptor& descriptor,
1111  Optional<std::string&> reasonIfUnsupported) const
1112 {
1114  reasonIfUnsupported,
1115  input,
1116  output,
1117  weights,
1118  biases,
1119  descriptor,
1120  nullptr);
1121 }
1122 
1124  const TensorInfo& input1,
1125  const TensorInfo& output,
1126  const GatherDescriptor& descriptor,
1127  Optional<std::string&> reasonIfUnsupported) const
1128 {
1130  reasonIfUnsupported,
1131  input0,
1132  input1,
1133  output,
1134  descriptor);
1135 }
1136 
1138  const TensorInfo& input1,
1139  const TensorInfo& output,
1140  Optional<std::string&> reasonIfUnsupported) const
1141 {
1143  reasonIfUnsupported,
1144  input0,
1145  input1,
1146  output);
1147 }
1148 
1150  Optional<std::string&> reasonIfUnsupported) const
1151 {
1152  return IsClBackendSupported(reasonIfUnsupported, input);
1153 }
1154 
1156  const TensorInfo& output,
1157  const InstanceNormalizationDescriptor& descriptor,
1158  Optional<std::string&> reasonIfUnsupported) const
1159 {
1161  reasonIfUnsupported,
1162  input,
1163  output,
1164  descriptor);
1165 }
1166 
1168  const TensorInfo& output,
1169  const L2NormalizationDescriptor& descriptor,
1170  Optional<std::string&> reasonIfUnsupported) const
1171 {
1173  reasonIfUnsupported,
1174  input,
1175  output,
1176  descriptor);
1177 }
1178 
1180  const TensorInfo& input1,
1181  const TensorInfo& output,
1182  const LogicalBinaryDescriptor& descriptor,
1183  Optional<std::string&> reasonIfUnsupported) const
1184 {
1185  IgnoreUnused(output);
1186 
1187  switch(descriptor.m_Operation)
1188  {
1191  reasonIfUnsupported,
1192  input0,
1193  input1,
1194  output);
1197  reasonIfUnsupported,
1198  input0,
1199  input1,
1200  output);
1201  default:
1202  return false;
1203  }
1204 }
1205 
1206 
1208  const TensorInfo& output,
1209  const LogSoftmaxDescriptor& descriptor,
1210  Optional<std::string&> reasonIfUnsupported) const
1211 {
1213  reasonIfUnsupported,
1214  input,
1215  output,
1216  descriptor);
1217 }
1218 
1220  const TensorInfo& outputStateIn,
1221  const TensorInfo& cellStateIn,
1222  const TensorInfo& scratchBuffer,
1223  const TensorInfo& outputStateOut,
1224  const TensorInfo& cellStateOut,
1225  const TensorInfo& output,
1226  const LstmDescriptor& descriptor,
1227  const LstmInputParamsInfo& paramsInfo,
1228  Optional<std::string&> reasonIfUnsupported) const
1229 {
1231  reasonIfUnsupported,
1232  input,
1233  outputStateIn,
1234  cellStateIn,
1235  scratchBuffer,
1236  outputStateOut,
1237  cellStateOut,
1238  output,
1239  descriptor,
1240  paramsInfo);
1241 }
1242 
1244  const TensorInfo& input1,
1245  const TensorInfo& output,
1246  Optional<std::string&> reasonIfUnsupported) const
1247 {
1249  reasonIfUnsupported,
1250  input0,
1251  input1,
1252  output);
1253 }
1254 
1256  const TensorInfo& output,
1257  const MeanDescriptor& descriptor,
1258  Optional<std::string&> reasonIfUnsupported) const
1259 {
1261  reasonIfUnsupported,
1262  input,
1263  output,
1264  descriptor);
1265 }
1266 
1268  const TensorInfo& input1,
1269  const TensorInfo& output,
1270  Optional<std::string&> reasonIfUnsupported) const
1271 {
1273  reasonIfUnsupported,
1274  input0,
1275  input1,
1276  output);
1277 }
1278 
1280  const TensorInfo& input1,
1281  const TensorInfo& output,
1282  Optional<std::string&> reasonIfUnsupported) const
1283 {
1285  reasonIfUnsupported,
1286  input0,
1287  input1,
1288  output,
1289  nullptr);
1290 }
1291 
1293  const TensorInfo& output,
1294  const NormalizationDescriptor& descriptor,
1295  Optional<std::string&> reasonIfUnsupported) const
1296 {
1297  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1298 }
1299 
1301  Optional<std::string&> reasonIfUnsupported) const
1302 {
1303  return IsClBackendSupported(reasonIfUnsupported, output);
1304 }
1305 
1307  const TensorInfo& output,
1308  const PadDescriptor& descriptor,
1309  Optional<std::string&> reasonIfUnsupported) const
1310 {
1312  reasonIfUnsupported,
1313  input,
1314  output,
1315  descriptor);
1316 }
1317 
1319  const TensorInfo& output,
1320  const PermuteDescriptor& descriptor,
1321  Optional<std::string&> reasonIfUnsupported) const
1322 {
1323  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1324 }
1325 
1327  const TensorInfo& output,
1328  const Pooling2dDescriptor& descriptor,
1329  Optional<std::string&> reasonIfUnsupported) const
1330 {
1331  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1332 }
1333 
1335  const TensorInfo& output,
1336  const Pooling3dDescriptor& descriptor,
1337  Optional<std::string&> reasonIfUnsupported) const
1338 {
1339  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1340 }
1341 
1343  const armnn::TensorInfo &alpha,
1344  const armnn::TensorInfo &output,
1345  armnn::Optional<std::string &> reasonIfUnsupported) const
1346 {
1347  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1348 }
1349 
1351  const TensorInfo& previousOutputIn,
1352  const TensorInfo& previousCellStateIn,
1353  const TensorInfo& outputStateOut,
1354  const TensorInfo& cellStateOut,
1355  const TensorInfo& output,
1356  const QLstmDescriptor& descriptor,
1357  const LstmInputParamsInfo& paramsInfo,
1358  Optional<std::string&> reasonIfUnsupported) const
1359 {
1360  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1361  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1362  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1363  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1364  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1366  {
1368  reasonIfUnsupported,
1369  input,
1370  previousCellStateIn,
1371  previousOutputIn,
1372  cellStateOut,
1373  outputStateOut,
1374  output,
1375  descriptor,
1376  paramsInfo);
1377  }
1378  else
1379  {
1380  return false;
1381  }
1382 }
1383 
1385  const TensorInfo& previousCellStateIn,
1386  const TensorInfo& previousOutputIn,
1387  const TensorInfo& cellStateOut,
1388  const TensorInfo& output,
1389  const QuantizedLstmInputParamsInfo& paramsInfo,
1390  Optional<std::string&> reasonIfUnsupported) const
1391 {
1393  reasonIfUnsupported,
1394  input,
1395  previousCellStateIn,
1396  previousOutputIn,
1397  cellStateOut,
1398  output,
1399  paramsInfo);
1400 }
1401 
1403  const TensorInfo& output,
1404  Optional<std::string&> reasonIfUnsupported) const
1405 {
1407  reasonIfUnsupported,
1408  input,
1409  output);
1410 }
1411 
1413  const TensorInfo& output,
1414  const ReduceDescriptor& descriptor,
1415  Optional<std::string&> reasonIfUnsupported) const
1416 {
1418  reasonIfUnsupported,
1419  input,
1420  output,
1421  descriptor);
1422 }
1423 
1425  const TensorInfo& output,
1426  const ReshapeDescriptor& descriptor,
1427  Optional<std::string&> reasonIfUnsupported) const
1428 {
1429  IgnoreUnused(descriptor);
1430  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
1431 }
1432 
1434  const TensorInfo& output,
1435  const ResizeDescriptor& descriptor,
1436  Optional<std::string&> reasonIfUnsupported) const
1437 {
1438  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1439 }
1440 
1442  const TensorInfo& axis,
1443  const TensorInfo& output,
1444  Optional<std::string&> reasonIfUnsupported) const
1445 {
1447  reasonIfUnsupported,
1448  input,
1449  axis,
1450  output);
1451 }
1452 
1454  const TensorInfo& indices,
1455  const TensorInfo& updates,
1456  const TensorInfo& output,
1457  const ScatterNdDescriptor& descriptor,
1458  Optional<std::string&> reasonIfUnsupported) const
1459 {
1461  reasonIfUnsupported,
1462  input,
1463  indices,
1464  updates,
1465  output,
1466  descriptor);
1467 }
1468 
1470  const TensorInfo& output,
1471  const SliceDescriptor& descriptor,
1472  Optional<std::string&> reasonIfUnsupported) const
1473 {
1474  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1475 }
1476 
1478  const TensorInfo& output,
1479  const SoftmaxDescriptor& descriptor,
1480  Optional<std::string&> reasonIfUnsupported) const
1481 {
1482  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1483 }
1484 
1486  const TensorInfo& output,
1487  const SpaceToBatchNdDescriptor& descriptor,
1488  Optional<std::string&> reasonIfUnsupported) const
1489 {
1491  reasonIfUnsupported,
1492  input,
1493  output,
1494  descriptor);
1495 }
1496 
1498  const TensorInfo& output,
1499  const SpaceToDepthDescriptor& descriptor,
1500  Optional<std::string&> reasonIfUnsupported) const
1501 {
1503  reasonIfUnsupported,
1504  input,
1505  output,
1506  descriptor);
1507 }
1508 
1510  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1511  const ViewsDescriptor& descriptor,
1512  Optional<std::string&> reasonIfUnsupported) const
1513 {
1514 #if defined(ARMCOMPUTECL_ENABLED)
1515  // Split along the last dimension, cannot use sub-tensors
1516  // as width and height of the sub-tensors do not match
1517  // the width and height of the parent tensor
1518  // in case of input with more than 2D.
1519  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1520  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1521  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1522  {
1524  reasonIfUnsupported,
1525  input,
1526  outputs,
1527  *splitAxis.begin());
1528  }
1529 #endif
1530  IgnoreUnused(descriptor);
1531  for (auto output : outputs)
1532  {
1533  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1534  {
1535  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1536  return false;
1537  }
1538  }
1539  return true;
1540 }
1541 
1542 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1543  const TensorInfo& output,
1544  const StackDescriptor& descriptor,
1545  Optional<std::string&> reasonIfUnsupported) const
1546 {
1548  reasonIfUnsupported,
1549  inputs,
1550  output,
1551  descriptor);
1552 }
1553 
1555  const TensorInfo& output,
1556  const StridedSliceDescriptor& descriptor,
1557  Optional<std::string&> reasonIfUnsupported) const
1558 {
1560  reasonIfUnsupported,
1561  input,
1562  output,
1563  descriptor);
1564 }
1565 
1567  const TensorInfo& input1,
1568  const TensorInfo& output,
1569  Optional<std::string&> reasonIfUnsupported) const
1570 {
1572  reasonIfUnsupported,
1573  input0,
1574  input1,
1575  output,
1576  nullptr);
1577 }
1578 
1580  const TensorInfo& output,
1581  const TileDescriptor& descriptor,
1582  Optional<std::string&> reasonIfUnsupported) const
1583 {
1585  reasonIfUnsupported,
1586  input,
1587  output,
1588  descriptor);
1589 }
1590 
1592  const TensorInfo& output,
1593  const TransposeConvolution2dDescriptor& descriptor,
1594  const TensorInfo& weights,
1595  const Optional<TensorInfo>& biases,
1596  Optional<std::string&> reasonIfUnsupported) const
1597 {
1599  reasonIfUnsupported,
1600  input,
1601  output,
1602  descriptor,
1603  weights,
1604  biases);
1605 }
1606 
1608  const TensorInfo& output,
1609  const TransposeDescriptor& descriptor,
1610  Optional<std::string&> reasonIfUnsupported) const
1611 {
1612  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1613 }
1614 
1616  const TensorInfo& outputStateIn,
1617  const TensorInfo& cellStateIn,
1618  const TensorInfo& outputStateOut,
1619  const TensorInfo& cellStateOut,
1620  const TensorInfo& output,
1621  const UnidirectionalSequenceLstmDescriptor& descriptor,
1622  const LstmInputParamsInfo& paramsInfo,
1623  Optional<std::string&> reasonIfUnsupported) const
1624 {
1626  reasonIfUnsupported,
1627  input,
1628  outputStateIn,
1629  cellStateIn,
1630  outputStateOut,
1631  cellStateOut,
1632  output,
1633  descriptor,
1634  paramsInfo);
1635 }
1636 
1637 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::ClLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1326
ClPooling3dWorkload.hpp
armnn::ClLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:758
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::ClLogWorkloadValidate
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogWorkload.cpp:18
armnn::BinaryOperation::Mul
@ Mul
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::ClRsqrtWorkloadValidate
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClRsqrtWorkload.cpp:18
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
ClConstantWorkload.hpp
armnn::ClLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1267
ClDepthwiseConvolutionWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
ClArgMinMaxWorkload.hpp
armnn::ClFloorWorkloadValidate
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClFloorFloatWorkload.cpp:14
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::ClLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1342
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::Optional
Definition: Optional.hpp:270
armnn::ClLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:896
armnn::ClLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1219
ClLogWorkload.hpp
armnn::ClLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:827
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
ClElementwiseBinaryWorkload.hpp
armnn::ClTransposeConvolution2dWorkloadValidate
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: ClTransposeConvolution2dWorkload.cpp:26
ClLogSoftmaxWorkload.hpp
armnn::ClCastValidate
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClCastWorkload.cpp:20
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: ClLayerSupport.cpp:153
ClDepthToSpaceWorkload.hpp
armnn::ClStridedSliceWorkloadValidate
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: ClStridedSliceWorkload.cpp:27
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
WorkloadUtils.hpp
armnn::ClExpWorkloadValidate
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClExpWorkload.cpp:18
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::ClLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:717
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::ClLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:743
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
ClQuantizedLstmWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::ClGatherNdWorkloadValidate
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: ClGatherNdWorkload.cpp:16
armnn::ClSpaceToDepthWorkloadValidate
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: ClSpaceToDepthWorkload.cpp:54
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ClLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1243
armnn::ClSplitterWorkloadValidate
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: ClSplitterWorkload.cpp:32
armnn::ClLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:705
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
ClUnidirectionalSequenceLstmFloatWorkload.hpp
ClNormalizationFloatWorkload.hpp
armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClUnidirectionalSequenceLstmFloatWorkload.cpp:508
armnn::ClBatchToSpaceNdWorkloadValidate
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: ClBatchToSpaceNdWorkload.cpp:17
ClLayerSupport.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
ClReverseV2Workload.hpp
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::ClLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1300
armnn::ClLogicalAndWorkloadValidate
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalAndWorkload.cpp:20
armnn::LayerType::Comparison
@ Comparison
armnn::ClLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:984
armnn::ClLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1306
armnn::ClLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1106
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ClLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1179
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
ClPooling2dWorkload.hpp
armnn::ClDepthToSpaceWorkloadValidate
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: ClDepthToSpaceWorkload.cpp:22
armnn::ClActivationWorkloadValidate
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: ClActivationWorkload.cpp:17
armnn::ClMeanValidate
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: ClMeanWorkload.cpp:17
armnn::ClElementwiseBinaryValidate
arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClElementwiseBinaryWorkload.cpp:64
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
ClReduceWorkload.hpp
ClMeanWorkload.hpp
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ClSinWorkload.hpp
armnn::ClChannelShuffleValidate
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: ClChannelShuffleWorkload.cpp:20
armnn::LayerType::Tile
@ Tile
ClResizeWorkload.hpp
ClComparisonWorkload.hpp
ClPreluWorkload.hpp
armnn::ClLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1292
armnn::ClLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1412
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
IgnoreUnused.hpp
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
ClCastWorkload.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::ClLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1469
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::ClLayerSupport::ClLayerSupport
ClLayerSupport()
Definition: ClLayerSupport.cpp:184
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ClLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1477
ClBatchToSpaceNdWorkload.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::ClLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1255
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::ClL2NormalizationWorkloadValidate
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: ClL2NormalizationFloatWorkload.cpp:17
armnn::ClLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:801
armnn::ClLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:876
armnn::ClLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1096
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
ClDivisionWorkload.hpp
ClGatherNdWorkload.hpp
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::ClNegWorkloadValidate
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClNegWorkload.cpp:18
ClBackendModelContext.hpp
LayerSupportCommon.hpp
ClGatherWorkload.hpp
armnn::ClLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1402
armnn::ClQLstmWorkloadValidate
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClQLstmWorkload.cpp:247
armnn::ClReduceWorkloadValidate
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: ClReduceWorkload.cpp:18
armnn::LayerType::Slice
@ Slice
ClExpWorkload.hpp
armnn::ClLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1279
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
armnn::ClLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1591
ClInstanceNormalizationWorkload.hpp
armnn::ClConstantWorkloadValidate
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
Definition: ClConstantWorkload.cpp:18
armnn::BinaryOperation::Maximum
@ Maximum
armnn::ClPermuteWorkloadValidate
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: ClPermuteWorkload.cpp:17
armnn::ClLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1155
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
ClActivationWorkload.hpp
ClTransposeConvolution2dWorkload.hpp
ClDequantizeWorkload.hpp
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::ClLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1497
armnn::ClLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1424
ClSpaceToBatchNdWorkload.hpp
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::ClLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: ClLayerSupport.cpp:189
armnn::ClResizeWorkloadValidate
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: ClResizeWorkload.cpp:22
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::UnaryOperation::Exp
@ Exp
armnn::ClPooling3dWorkloadValidate
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: ClPooling3dWorkload.cpp:18
armnn::ClStackWorkloadValidate
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: ClStackWorkload.cpp:29
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
ClSliceWorkload.hpp
ClTransposeWorkload.hpp
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
ClStridedSliceWorkload.hpp
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
ClL2NormalizationFloatWorkload.hpp
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::ClLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1615
armnn::ClComparisonWorkloadValidate
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: ClComparisonWorkload.cpp:24
armnn::ClTileWorkloadValidate
arm_compute::Status ClTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: ClTileWorkload.cpp:16
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::ClLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:813
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
ClConcatWorkload.hpp
armnn::ClLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1334
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
ClConvertFp16ToFp32Workload.hpp
ClFullyConnectedWorkload.hpp
armnn::ClConvertFp32ToFp16WorkloadValidate
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp32ToFp16Workload.cpp:44
armnn::ClTransposeWorkloadValidate
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: ClTransposeWorkload.cpp:17
armnn::ClScatterNdWorkloadValidate
arm_compute::Status ClScatterNdWorkloadValidate(const TensorInfo &inputInfo, const TensorInfo &indicesInfo, const TensorInfo &updatesInfo, const TensorInfo &outputInfo, const ScatterNdDescriptor &descriptor)
Definition: ClScatterNdWorkload.cpp:20
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
ClSpaceToDepthWorkload.hpp
armnn::ClSoftmaxWorkloadValidate
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: ClSoftmaxWorkload.cpp:17
armnn::ClReverseV2WorkloadValidate
arm_compute::Status ClReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: ClReverseV2Workload.cpp:16
armnn::LayerType::Addition
@ Addition
armnn::ClPadValidate
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: ClPadWorkload.cpp:62
ClNegWorkload.hpp
armnn::ClLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1607
armnn::ClLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1579
ArmComputeUtils.hpp
armnn::ClInstanceNormalizationWorkloadValidate
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: ClInstanceNormalizationWorkload.cpp:18
armnn::ClGatherWorkloadValidate
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: ClGatherWorkload.cpp:15
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
ClLogicalNotWorkload.hpp
ClLstmFloatWorkload.hpp
armnn::ClSliceWorkloadValidate
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: ClSliceWorkload.cpp:18
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchMatMulWorkload.cpp:24
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::ClLstmFloatWorkloadValidate
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClLstmFloatWorkload.cpp:244
armnn::BinaryOperation::Power
@ Power
armnn::ClLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1485
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
ClQuantizeWorkload.hpp
armnn::ClLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1167
armnn::ClLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:791
ClBatchNormalizationFloatWorkload.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::ClSqrtWorkloadValidate
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSqrtWorkload.cpp:19
armnn::LayerType::Division
@ Division
armnn::LayerType::Shape
@ Shape
armnn::ClNormalizationWorkloadValidate
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: ClNormalizationFloatWorkload.cpp:19
armnn::ClLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1019
ClLogicalAndWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ClLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1149
armnn::ClLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:972
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
ClPadWorkload.hpp
ClConvolution2dWorkload.hpp
ClRsqrtWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::ClLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:730
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::ClLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:779
ClConvertFp32ToFp16Workload.hpp
ClBackendId.hpp
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ClLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1032
ClAdditionWorkload.hpp
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
ClConvolution3dWorkload.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::ClLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1433
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::ClLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1084
armnn::ClLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:929
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
armnn::ClSpaceToBatchNdWorkloadValidate
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: ClSpaceToBatchNdWorkload.cpp:16
armnn::Status
Status
Definition: Types.hpp:42
armnn::ClLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1384
ClMinimumWorkload.hpp
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::ClLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1566
ClStackWorkload.hpp
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ClLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1318
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::ClConvolution3dWorkloadValidate
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution3dWorkload.cpp:23
armnn::ClLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1137
armnn::LayerType::Fill
@ Fill
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
armnn::ClQuantizedLstmWorkloadValidate
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: ClQuantizedLstmWorkload.cpp:18
armnn::ClLogSoftmaxWorkloadValidate
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: ClLogSoftmaxWorkload.cpp:17
armnn::LayerType::Minimum
@ Minimum
ClSplitterWorkload.hpp
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
ClBatchMatMulWorkload.hpp
armnn::ClConcatWorkloadValidate
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: ClConcatWorkload.cpp:27
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::ClLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1509
ClMaximumWorkload.hpp
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
ClSubtractionWorkload.hpp
armnn::ClLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:962
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ClPreluWorkloadValidate
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: ClPreluWorkload.cpp:16
armnn::ClLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1123
armnn::LayerType::Pad
@ Pad
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ClSinWorkloadValidate
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSinWorkload.cpp:18
armnn::LayerType::Rank
@ Rank
armnn::ClAbsWorkloadValidate
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClAbsWorkload.cpp:19
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
InternalTypes.hpp
armnn::ClLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1554
ClMultiplicationWorkload.hpp
armnn::ClLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1542
ClAbsWorkload.hpp
ClTileWorkload.hpp
armnn::LayerType::Input
@ Input
ClReshapeWorkload.hpp
armnn::ClConvertFp16ToFp32WorkloadValidate
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp16ToFp32Workload.cpp:44
armnn::LayerType::Resize
@ Resize
armnn::ClLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1207
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
ClSoftmaxWorkload.hpp
ClFillWorkload.hpp
ClFloorFloatWorkload.hpp
armnn::BinaryOperation::Div
@ Div
armnn::ClLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:868
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::ClLogicalOrWorkloadValidate
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalOrWorkload.cpp:20
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
ClLogicalOrWorkload.hpp
armnn::LayerType::Dequantize
@ Dequantize
armnn::ScatterNdDescriptor
A ScatterNdDescriptor for the ScatterNdLayer.
Definition: Descriptors.hpp:1679
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::ClReshapeWorkloadValidate
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClReshapeWorkload.cpp:15
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::ClLayerSupport::IsScatterNdSupported
bool IsScatterNdSupported(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1453
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
ClScatterNdWorkload.hpp
armnn::LayerType::Unmap
@ Unmap
ClPermuteWorkload.hpp
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
ClSqrtWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::ClArgMinMaxWorkloadValidate
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: ClArgMinMaxWorkload.cpp:31
ClQLstmWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ClLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1441
armnn::ClLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1001
armnn::ClLogicalNotWorkloadValidate
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogicalNotWorkload.cpp:20
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ClLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:886
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
armnn::ClLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1350
ClChannelShuffleWorkload.hpp